diff --git a/_attributes/attributes-microshift.adoc b/_attributes/attributes-microshift.adoc deleted file mode 100644 index fa6197dbd83c..000000000000 --- a/_attributes/attributes-microshift.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// common attributes -:toc: -:toc-title: -:experimental: -:imagesdir: images -:OCP: OpenShift Container Platform -:ocp-version: 4.13 -:oc-first: pass:quotes[OpenShift CLI (`oc`)] -:product-registry: OpenShift image registry -:rhel-major: rhel-9 -:op-system-base-full: Red Hat Enterprise Linux (RHEL) -:op-system: RHEL -:op-system-ostree-first: Red Hat Enterprise Linux (RHEL) for Edge -:op-system-ostree: RHEL for Edge -:op-system-version: 9.2 -:op-system-version-major: 9 -:op-system-bundle: Red Hat Device Edge -:op-system-bundle-short: RHDE -:VirtProductName: OpenShift Virtualization diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc deleted file mode 100644 index e1f1750a5f71..000000000000 --- a/_attributes/attributes-openshift-dedicated.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// common attributes -:product-short-name: OpenShift Dedicated -:toc: -:toc-title: -:experimental: -:imagesdir: images -:OCP: OpenShift Container Platform -:op-system-first: Red Hat Enterprise Linux CoreOS (RHCOS) -:cluster-manager-first: Red Hat OpenShift Cluster Manager -:cluster-manager: OpenShift Cluster Manager -:cluster-manager-url: link:https://console.redhat.com/openshift[OpenShift Cluster Manager Hybrid Cloud Console] -:cluster-manager-url-pull: link:https://console.redhat.com/openshift/install/pull-secret[pull secret from the Red Hat OpenShift Cluster Manager] -:hybrid-console: Red Hat Hybrid Cloud Console -:hybrid-console-second: Hybrid Cloud Console -:AWS: Amazon Web Services (AWS) -:GCP: Google Cloud Platform (GCP) -:product-registry: OpenShift image registry -:kebab: image:kebab.png[title="Options menu"] -:rhq-short: Red Hat Quay -:SMProductName: Red Hat OpenShift Service Mesh -:pipelines-title: Red Hat OpenShift Pipelines -:logging-sd: Red Hat OpenShift Logging -:ServerlessProductName: OpenShift Serverless -:rhoda: Red Hat OpenShift Database Access -:rhoda-short: RHODA -:rhods: Red Hat OpenShift Data Science -:osd: OpenShift Dedicated -//Formerly known as CodeReady Containers and CodeReady Workspaces -:openshift-local-productname: Red Hat OpenShift Local -:openshift-dev-spaces-productname: Red Hat OpenShift Dev Spaces -:hcp: hosted control planes -:hcp-title: ROSA with HCP -:hcp-title-first: {product-title} (ROSA) with {hcp} (HCP) -//ROSA CLI variables -:word: Testing this variable let's go www.google.com \ No newline at end of file diff --git a/_attributes/servicebinding-document-attributes.adoc b/_attributes/servicebinding-document-attributes.adoc deleted file mode 100644 index 128980867238..000000000000 --- a/_attributes/servicebinding-document-attributes.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Standard document attributes to be used in the documentation -// -// The following are shared by all documents: -:toc: -:toclevels: 4 -:experimental: -// -// Product content attributes, that is, substitution variables in the files. -// -:servicebinding-title: Service Binding Operator -:servicebinding-shortname: Service Binding -:servicebinding-ver: servicebinding-1.0 diff --git a/_distro_map.yml b/_distro_map.yml index ed9d1d8d34a5..e5cc8106d84c 100644 --- a/_distro_map.yml +++ b/_distro_map.yml @@ -1,295 +1,11 @@ --- -openshift-origin: - name: OKD - author: OKD Documentation Project - site: community - site_name: Documentation - site_url: https://docs.okd.io/ - branches: - main: - name: 4 - dir: latest - enterprise-4.6: - name: '4.6' - dir: '4.6' - enterprise-4.7: - name: '4.7' - dir: '4.7' - enterprise-4.8: - name: '4.8' - dir: '4.8' - enterprise-4.9: - name: '4.9' - dir: '4.9' - enterprise-4.10: - name: '4.10' - dir: '4.10' - enterprise-4.11: - name: '4.11' - dir: '4.11' - enterprise-4.12: - name: '4.12' - dir: '4.12' - enterprise-4.13: - name: '4.13' - dir: '4.13' - enterprise-3.6: - name: '3.6' - dir: '3.6' - enterprise-3.7: - name: '3.7' - dir: '3.7' - enterprise-3.9: - name: '3.9' - dir: '3.9' - enterprise-3.10: - name: '3.10' - dir: '3.10' - enterprise-3.11: - name: '3.11' - dir: '3.11' -openshift-online: - name: OpenShift Online - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: 'Pro' - dir: online/pro -openshift-enterprise: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.0: - name: '3.0' - dir: enterprise/3.0 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.1: - name: '3.1' - dir: enterprise/3.1 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.2: - name: '3.2' - dir: enterprise/3.2 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.3: - name: '3.3' - dir: container-platform/3.3 - enterprise-3.4: - name: '3.4' - dir: container-platform/3.4 - enterprise-3.5: - name: '3.5' - dir: container-platform/3.5 - enterprise-3.6: - name: '3.6' - dir: container-platform/3.6 - enterprise-3.7: - name: '3.7' - dir: container-platform/3.7 - enterprise-3.9: - name: '3.9' - dir: container-platform/3.9 - enterprise-3.10: - name: '3.10' - dir: container-platform/3.10 - enterprise-3.11: - name: '3.11' - dir: container-platform/3.11 - enterprise-4.1: - name: '4.1' - dir: container-platform/4.1 - enterprise-4.2: - name: '4.2' - dir: container-platform/4.2 - enterprise-4.3: - name: '4.3' - dir: container-platform/4.3 - enterprise-4.4: - name: '4.4' - dir: container-platform/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform/4.5 - enterprise-4.6: - name: '4.6' - dir: container-platform/4.6 - enterprise-4.7: - name: '4.7' - dir: container-platform/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform/4.8 - enterprise-4.9: - name: '4.9' - dir: container-platform/4.9 - enterprise-4.10: - name: '4.10' - dir: container-platform/4.10 - enterprise-4.11: - name: '4.11' - dir: container-platform/4.11 - enterprise-4.12: - name: '4.12' - dir: container-platform/4.12 - enterprise-4.13: - name: '4.13' - dir: container-platform/4.13 - enterprise-4.14: - name: '4.14' - dir: container-platform/4.14 -openshift-dedicated: - name: OpenShift Dedicated - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: dedicated/3 - enterprise-4.13: - name: '' - dir: dedicated/ -openshift-aro: - name: Azure Red Hat OpenShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: aro/3 - enterprise-4.3: - name: '4' - dir: aro/4 -openshift-rosa: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.13: - name: '' - dir: rosa/ - rosa-preview: - name: '' - dir: rosa-preview/ -openshift-rosa-portal: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.13: - name: '' - dir: rosa-portal/ -openshift-webscale: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.4: - name: '4.4' - dir: container-platform-ocp/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform-ocp/4.5 - enterprise-4.7: - name: '4.7' - dir: container-platform-ocp/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform-ocp/4.8 -openshift-dpu: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.10: - name: '4.10' - dir: container-platform-dpu/4.10 -openshift-acs: - name: Red Hat Advanced Cluster Security for Kubernetes - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - rhacs-docs-3.65: - name: '3.65' - dir: acs/3.65 - rhacs-docs-3.66: - name: '3.66' - dir: acs/3.66 - rhacs-docs-3.67: - name: '3.67' - dir: acs/3.67 - rhacs-docs-3.68: - name: '3.68' - dir: acs/3.68 - rhacs-docs-3.69: - name: '3.69' - dir: acs/3.69 - rhacs-docs-3.70: - name: '3.70' - dir: acs/3.70 - rhacs-docs-3.71: - name: '3.71' - dir: acs/3.71 - rhacs-docs-3.72: - name: '3.72' - dir: acs/3.72 - rhacs-docs-3.73: - name: '3.73' - dir: acs/3.73 - rhacs-docs-3.74: - name: '3.74' - dir: acs/3.74 - rhacs-docs-4.0: - name: '4.0' - dir: acs/4.0 - rhacs-docs-4.1: - name: '4.1' - dir: acs/4.1 -microshift: - name: MicroShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.12: - name: '4.12' - dir: microshift/4.12 - enterprise-4.13: - name: '4.13' - dir: microshift/4.13 -openshift-serverless: - name: Red Hat OpenShift Serverless +openshift-builds: + name: Red Hat OpenShift Builds author: OpenShift documentation team site: commercial site_name: Documentation site_url: https://docs.openshift.com/ branches: - serverless-docs-1.28: - name: '1.28' - dir: serverless/1.28 - serverless-docs-1.29: - name: '1.29' - dir: serverless/1.29 + build-docs: + name: '' + dir: builds diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index e80d580574d4..3887aac73fd1 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1,3877 +1,7 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - --- -Name: About -Dir: welcome -Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-online,openshift-dpu +Name: About OpenShift Builds +Dir: builds +Distros: openshift-builds Topics: -- Name: Welcome - File: index -- Name: Learn more about OpenShift Container Platform - File: learn_more_about_openshift - Distros: openshift-enterprise -- Name: About OpenShift Kubernetes Engine - File: oke_about - Distros: openshift-enterprise -- Name: Legal notice - File: legal-notice - Distros: openshift-enterprise,openshift-online ---- -Name: What's new? -Dir: whats_new -Distros: openshift-origin -Topics: -- Name: New features and enhancements - File: new-features -- Name: Deprecated features - File: deprecated-features ---- -Name: Release notes -Dir: release_notes -Distros: openshift-enterprise -Topics: -- Name: OpenShift Container Platform 4.14 release notes - File: ocp-4-14-release-notes ---- - Name: Getting started - Dir: getting_started - Distros: openshift-enterprise - Topics: - - Name: Kubernetes overview - File: kubernetes-overview - - Name: OpenShift Container Platform overview - File: openshift-overview - - Name: Web console walkthrough - File: openshift-web-console - - Name: Command-line walkthrough - File: openshift-cli ---- -Name: Architecture -Dir: architecture -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Architecture overview - File: index -- Name: Product architecture - File: architecture -- Name: Installation and update - Distros: openshift-enterprise,openshift-origin - File: architecture-installation -- Name: Red Hat OpenShift Cluster Manager - Distros: openshift-enterprise - File: ocm-overview-ocp -- Name: About multicluster engine for Kubernetes operator - Distros: openshift-enterprise - File: mce-overview-ocp -- Name: Control plane architecture - File: control-plane - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding OpenShift development - File: understanding-development - Distros: openshift-enterprise -- Name: Understanding OKD development - File: understanding-development - Distros: openshift-origin -- Name: Fedora CoreOS - File: architecture-rhcos - Distros: openshift-origin -- Name: Red Hat Enterprise Linux CoreOS - File: architecture-rhcos - Distros: openshift-enterprise -- Name: Admission plugins - File: admission-plug-ins - Distros: openshift-enterprise,openshift-aro ---- -Name: Installing -Dir: installing -Distros: openshift-origin,openshift-enterprise,openshift-webscale -Topics: -- Name: Installation overview - File: index - Distros: openshift-origin,openshift-enterprise -- Name: Selecting an installation method and preparing a cluster - File: installing-preparing - Distros: openshift-origin,openshift-enterprise -- Name: Cluster capabilities - File: cluster-capabilities - Distros: openshift-origin,openshift-enterprise -- Name: Disconnected installation mirroring - Dir: disconnected_install - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About disconnected installation mirroring - File: index - - Name: Creating a mirror registry with mirror registry for Red Hat OpenShift - File: installing-mirroring-creating-registry - - Name: Mirroring images for a disconnected installation - File: installing-mirroring-installation-images - - Name: Mirroring images for a disconnected installation using the oc-mirror plugin - File: installing-mirroring-disconnected -- Name: Installing on Alibaba - Dir: installing_alibaba - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Alibaba Cloud - File: preparing-to-install-on-alibaba - - Name: Creating the required Alibaba Cloud resources - File: manually-creating-alibaba-ram - - Name: Installing a cluster quickly on Alibaba Cloud - File: installing-alibaba-default - - Name: Installing a cluster on Alibaba Cloud with customizations - File: installing-alibaba-customizations - - Name: Installing a cluster on Alibaba Cloud with network customizations - File: installing-alibaba-network-customizations - - Name: Installing a cluster on Alibaba Cloud into an existing VPC - File: installing-alibaba-vpc - - Name: Uninstalling a cluster on Alibaba Cloud - File: uninstall-cluster-alibaba -- Name: Installing on AWS - Dir: installing_aws - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on AWS - File: preparing-to-install-on-aws - - Name: Configuring an AWS account - File: installing-aws-account - - Name: Manually creating IAM - File: manually-creating-iam - - Name: Installing a cluster quickly on AWS - File: installing-aws-default - - Name: Installing a cluster on AWS with customizations - File: installing-aws-customizations - - Name: Installing a cluster on AWS with network customizations - File: installing-aws-network-customizations - - Name: Installing a cluster on AWS in a restricted network - File: installing-restricted-networks-aws-installer-provisioned - - Name: Installing a cluster on AWS into an existing VPC - File: installing-aws-vpc - - Name: Installing a private cluster on AWS - File: installing-aws-private - - Name: Installing a cluster on AWS into a government region - File: installing-aws-government-region - - Name: Installing a cluster on AWS into a Secret or Top Secret Region - File: installing-aws-secret-region - - Name: Installing a cluster on AWS into a China region - File: installing-aws-china - - Name: Installing a cluster on AWS using CloudFormation templates - File: installing-aws-user-infra - - Name: Installing a cluster using AWS Local Zones - File: installing-aws-localzone - - Name: Installing a cluster on AWS in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-aws - - Name: Installing a cluster on AWS with remote workers on AWS Outposts - File: installing-aws-outposts-remote-workers - - Name: Installing a three-node cluster on AWS - File: installing-aws-three-node - - Name: Expanding a cluster with on-premise bare metal nodes - File: installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes - - Name: Uninstalling a cluster on AWS - File: uninstalling-cluster-aws -- Name: Installing on Azure - Dir: installing_azure - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Azure - File: preparing-to-install-on-azure - - Name: Configuring an Azure account - File: installing-azure-account - - Name: Manually creating IAM - File: manually-creating-iam-azure - - Name: Enabling user-managed encryption on Azure - File: enabling-user-managed-encryption-azure - - Name: Installing a cluster quickly on Azure - File: installing-azure-default - - Name: Installing a cluster on Azure with customizations - File: installing-azure-customizations - - Name: Installing a cluster on Azure with network customizations - File: installing-azure-network-customizations - - Name: Installing a cluster on Azure into an existing VNet - File: installing-azure-vnet - - Name: Installing a private cluster on Azure - File: installing-azure-private - - Name: Installing a cluster on Azure into a government region - File: installing-azure-government-region - - Name: Installing a cluster on Azure using ARM templates - File: installing-azure-user-infra - - Name: Installing a three-node cluster on Azure - File: installing-azure-three-node - - Name: Uninstalling a cluster on Azure - File: uninstalling-cluster-azure -- Name: Installing on Azure Stack Hub - Dir: installing_azure_stack_hub - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Azure Stack Hub - File: preparing-to-install-on-azure-stack-hub - - Name: Configuring an Azure Stack Hub account - File: installing-azure-stack-hub-account - - Name: Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure - File: installing-azure-stack-hub-default - - Name: Installing a cluster on Azure Stack Hub with network customizations - File: installing-azure-stack-hub-network-customizations - - Name: Installing a cluster on Azure Stack Hub using ARM templates - File: installing-azure-stack-hub-user-infra - - Name: Uninstalling a cluster on Azure Stack Hub - File: uninstalling-cluster-azure-stack-hub -- Name: Installing on GCP - Dir: installing_gcp - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on GCP - File: preparing-to-install-on-gcp - - Name: Configuring a GCP project - File: installing-gcp-account - - Name: Manually creating IAM - File: manually-creating-iam-gcp - - Name: Installing a cluster quickly on GCP - File: installing-gcp-default - - Name: Installing a cluster on GCP with customizations - File: installing-gcp-customizations - - Name: Installing a cluster on GCP with network customizations - File: installing-gcp-network-customizations - - Name: Installing a cluster on GCP in a restricted network - File: installing-restricted-networks-gcp-installer-provisioned - - Name: Installing a cluster on GCP into an existing VPC - File: installing-gcp-vpc - - Name: Installing a cluster on GCP into a shared VPC - File: installing-gcp-shared-vpc - - Name: Installing a private cluster on GCP - File: installing-gcp-private - - Name: Installing a cluster on GCP using Deployment Manager templates - File: installing-gcp-user-infra - - Name: Installing a cluster into a shared VPC on GCP using Deployment Manager templates - File: installing-gcp-user-infra-vpc - - Name: Installing a cluster on GCP in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-gcp - - Name: Installing a three-node cluster on GCP - File: installing-gcp-three-node - - Name: Installation configuration parameters for GCP - File: installation-config-parameters-gcp - - Name: Uninstalling a cluster on GCP - File: uninstalling-cluster-gcp -- Name: Installing on IBM Cloud VPC - Dir: installing_ibm_cloud_public - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on IBM Cloud VPC - File: preparing-to-install-on-ibm-cloud - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account - - Name: Configuring IAM for IBM Cloud VPC - File: configuring-iam-ibm-cloud - - Name: Installing a cluster on IBM Cloud VPC with customizations - File: installing-ibm-cloud-customizations - - Name: Installing a cluster on IBM Cloud VPC with network customizations - File: installing-ibm-cloud-network-customizations - - Name: Installing a cluster on IBM Cloud VPC into an existing VPC - File: installing-ibm-cloud-vpc - - Name: Installing a private cluster on IBM Cloud VPC - File: installing-ibm-cloud-private - - Name: Uninstalling a cluster on IBM Cloud VPC - File: uninstalling-cluster-ibm-cloud -- Name: Installing on Nutanix - Dir: installing_nutanix - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Nutanix - File: preparing-to-install-on-nutanix - - Name: Installing a cluster on Nutanix - File: installing-nutanix-installer-provisioned - - Name: Installing a cluster on Nutanix in a restricted network - File: installing-restricted-networks-nutanix-installer-provisioned - - Name: Installing a three-node cluster on Nutanix - File: installing-nutanix-three-node - - Name: Uninstalling a cluster on Nutanix - File: uninstalling-cluster-nutanix -- Name: Installing on bare metal - Dir: installing_bare_metal - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on bare metal - File: preparing-to-install-on-bare-metal - - Name: Installing a user-provisioned cluster on bare metal - File: installing-bare-metal - - Name: Installing a user-provisioned bare metal cluster with network customizations - File: installing-bare-metal-network-customizations - - Name: Installing a user-provisioned bare metal cluster on a restricted network - File: installing-restricted-networks-bare-metal - - Name: Scaling a user-provisioned installation with the bare metal operator - File: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator -- Name: Installing on-premise with Assisted Installer - Dir: installing_on_prem_assisted - Distros: openshift-enterprise - Topics: - - Name: Installing an on-premise cluster using the Assisted Installer - File: installing-on-prem-assisted -- Name: Installing an on-premise cluster with the Agent-based Installer - Dir: installing_with_agent_based_installer - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with Agent-based Installer - File: preparing-to-install-with-agent-based-installer - - Name: Understanding disconnected installation mirroring - File: understanding-disconnected-installation-mirroring - - Name: Installing a cluster with Agent-based Installer - File: installing-with-agent-based-installer - - Name: Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes - File: preparing-an-agent-based-installed-cluster-for-mce -- Name: Installing on a single node - Dir: installing_sno - Distros: openshift-enterprise - Topics: - - Name: Preparing to install OpenShift on a single node - File: install-sno-preparing-to-install-sno - - Name: Installing OpenShift on a single node - File: install-sno-installing-sno -- Name: Deploying installer-provisioned clusters on bare metal - Dir: installing_bare_metal_ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Overview - File: ipi-install-overview - - Name: Prerequisites - File: ipi-install-prerequisites - - Name: Setting up the environment for an OpenShift installation - File: ipi-install-installation-workflow - - Name: Post-installation configuration - File: ipi-install-post-installation-configuration - - Name: Expanding the cluster - File: ipi-install-expanding-the-cluster - - Name: Troubleshooting - File: ipi-install-troubleshooting -- Name: Installing IBM Cloud Bare Metal (Classic) - Dir: installing_ibm_cloud - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Prerequisites - File: install-ibm-cloud-prerequisites - - Name: Installation workflow - File: install-ibm-cloud-installation-workflow -- Name: Installing with z/VM on IBM Z and IBM LinuxONE - Dir: installing_ibm_z - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with z/VM on IBM Z and IBM LinuxONE - File: preparing-to-install-on-ibm-z - - Name: Installing a cluster with z/VM on IBM Z and IBM LinuxONE - File: installing-ibm-z - - Name: Restricted network IBM Z installation with z/VM - File: installing-restricted-networks-ibm-z -- Name: Installing with RHEL KVM on IBM Z and IBM LinuxONE - Dir: installing_ibm_z - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with RHEL KVM on IBM Z and IBM LinuxONE - File: preparing-to-install-on-ibm-z-kvm - - Name: Installing a cluster with RHEL KVM on IBM Z and IBM LinuxONE - File: installing-ibm-z-kvm - - Name: Restricted network IBM Z installation with RHEL KVM - File: installing-restricted-networks-ibm-z-kvm -- Name: Installing on IBM Power - Dir: installing_ibm_power - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power - File: preparing-to-install-on-ibm-power - - Name: Installing a cluster on IBM Power - File: installing-ibm-power - - Name: Restricted network IBM Power installation - File: installing-restricted-networks-ibm-power -- Name: Installing on IBM Power Virtual Server - Dir: installing_ibm_powervs - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power Virtual Server - File: preparing-to-install-on-ibm-power-vs - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account-power-vs - - Name: Creating an IBM Power Virtual Server workspace - File: creating-ibm-power-vs-workspace - - Name: Installing a cluster on IBM Power Virtual Server with customizations - File: installing-ibm-power-vs-customizations - - Name: Installing a cluster on IBM Power Virtual Server into an existing VPC - File: installing-ibm-powervs-vpc - - Name: Installing a private cluster on IBM Power Virtual Server - File: installing-ibm-power-vs-private-cluster - - Name: Installing a cluster on IBM Power Virtual Server in a restricted network - File: installing-restricted-networks-ibm-power-vs - - Name: Uninstalling a cluster on IBM Power Virtual Server - File: uninstalling-cluster-ibm-power-vs -- Name: Installing on OpenStack - Dir: installing_openstack - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on OpenStack - File: preparing-to-install-on-openstack - - Name: Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack - File: installing-openstack-nfv-preparing -# - Name: Installing a cluster on OpenStack -# File: installing-openstack-installer - - Name: Installing a cluster on OpenStack with customizations - File: installing-openstack-installer-custom - - Name: Installing a cluster on OpenStack with Kuryr - File: installing-openstack-installer-kuryr - - Name: Installing a cluster on OpenStack on your own infrastructure - File: installing-openstack-user - - Name: Installing a cluster on OpenStack with Kuryr on your own infrastructure - File: installing-openstack-user-kuryr - - Name: Installing a cluster on OpenStack in a restricted network - File: installing-openstack-installer-restricted - - Name: OpenStack Cloud Controller Manager reference guide - File: installing-openstack-cloud-config-reference - # - Name: Load balancing deployments on OpenStack - # File: installing-openstack-load-balancing - - Name: Uninstalling a cluster on OpenStack - File: uninstalling-cluster-openstack - - Name: Uninstalling a cluster on OpenStack from your own infrastructure - File: uninstalling-openstack-user -- Name: Installing on vSphere - Dir: installing_vsphere - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on vSphere - File: preparing-to-install-on-vsphere - - Name: Installing a cluster on vSphere - File: installing-vsphere-installer-provisioned - - Name: Installing a cluster on vSphere with customizations - File: installing-vsphere-installer-provisioned-customizations - - Name: Installing a cluster on vSphere with network customizations - File: installing-vsphere-installer-provisioned-network-customizations - - Name: Installing a cluster on vSphere with user-provisioned infrastructure - File: installing-vsphere - - Name: Installing a cluster on vSphere with user-provisioned infrastructure and network customizations - File: installing-vsphere-network-customizations - - Name: Installing a cluster on vSphere in a restricted network - File: installing-restricted-networks-installer-provisioned-vsphere - - Name: Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-vsphere - - Name: Installing a three-node cluster on vSphere - File: installing-vsphere-three-node - - Name: Configuring the vSphere connection settings after an installation - File: installing-vsphere-post-installation-configuration - - Name: Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure - File: uninstalling-cluster-vsphere-installer-provisioned - - Name: Using the vSphere Problem Detector Operator - File: using-vsphere-problem-detector-operator - - Name: Installation configuration parameters for vSphere - File: installation-config-parameters-vsphere -- Name: Installing on any platform - Dir: installing_platform_agnostic - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installing a cluster on any platform - File: installing-platform-agnostic -- Name: Installation configuration - Dir: install_config - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Customizing nodes - File: installing-customizing - - Name: Configuring your firewall - File: configuring-firewall - - Name: Enabling Linux control group version 2 (cgroup v2) - File: enabling-cgroup-v2 - Distros: openshift-enterprise -- Name: Validating an installation - File: validating-an-installation - Distros: openshift-origin,openshift-enterprise -- Name: Troubleshooting installation issues - File: installing-troubleshooting - Distros: openshift-origin,openshift-enterprise ---- -Name: Post-installation configuration -Dir: post_installation_configuration -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Post-installation configuration overview - Distros: openshift-enterprise - File: index -- Name: Configuring a private cluster - Distros: openshift-enterprise,openshift-origin - File: configuring-private-cluster -- Name: Bare metal configuration - File: bare-metal-configuration -- Name: Configuring multi-architecture compute machines on an OpenShift cluster - Distros: openshift-enterprise - File: multi-architecture-configuration -- Name: Enabling encryption on a vSphere cluster - File: vsphere-post-installation-encryption -- Name: Machine configuration tasks - File: machine-configuration-tasks -- Name: Cluster tasks - File: cluster-tasks -- Name: Node tasks - File: node-tasks -- Name: Network configuration - File: network-configuration -- Name: Storage configuration - File: storage-configuration -- Name: Preparing for users - File: preparing-for-users -- Name: Configuring alert notifications - File: configuring-alert-notifications -- Name: Converting a connected cluster to a disconnected cluster - File: connected-to-disconnected -- Name: Enabling cluster capabilities - File: enabling-cluster-capabilities - Distros: openshift-origin,openshift-enterprise -- Name: Configuring additional devices in an IBM Z or IBM LinuxONE environment - File: ibmz-post-install -- Name: Regions and zones for a VMware vCenter - File: post-install-vsphere-zones-regions-configuration -- Name: Red Hat Enterprise Linux CoreOS image layering - File: coreos-layering - Distros: openshift-enterprise -- Name: Fedora CoreOS (FCOS) image layering - File: coreos-layering - Distros: openshift-origin ---- -Name: Updating clusters -Dir: updating -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Updating clusters overview - File: index - Distros: openshift-origin -- Name: Understanding OpenShift updates - Dir: understanding_updates - Topics: - - Name: Introduction to OpenShift updates - File: intro-to-updates - - Name: How cluster updates work - File: how-updates-work - Distros: openshift-enterprise - - Name: Understanding update channels and releases - File: understanding-update-channels-release - Distros: openshift-enterprise - - Name: Understanding OpenShift update duration - File: understanding-openshift-update-duration -- Name: Preparing to update a cluster - Dir: preparing_for_updates - Topics: - - Name: Preparing to update to OpenShift Container Platform 4.13 - File: updating-cluster-prepare - Distros: openshift-enterprise - - Name: Preparing to update to OKD 4.13 - File: updating-cluster-prepare - Distros: openshift-origin - - Name: Preparing to update a cluster with manually maintained credentials - File: preparing-manual-creds-update - - Name: Preflight validation for Kernel Module Management (KMM) Modules - File: kmm-preflight-validation -- Name: Performing a cluster update - Dir: updating_a_cluster - Topics: - - Name: Updating a cluster using the CLI - File: updating-cluster-cli - - Name: Updating a cluster using the web console - File: updating-cluster-web-console - - Name: Performing an EUS-to-EUS update - File: eus-eus-update - Distros: openshift-enterprise - - Name: Performing a canary rollout update - File: update-using-custom-machine-config-pools - - Name: Updating a cluster that includes RHEL compute machines - File: updating-cluster-rhel-compute - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment - Dir: updating_disconnected_cluster - Topics: - - Name: About cluster updates in a disconnected environment - File: index - - Name: Mirroring OpenShift Container Platform images - File: mirroring-image-repository - - Name: Updating a cluster in a disconnected environment using OSUS - File: disconnected-update-osus - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment without OSUS - File: disconnected-update - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment by using the CLI - File: disconnected-update - Distros: openshift-origin - - Name: Uninstalling OSUS from a cluster - File: uninstalling-osus - Distros: openshift-enterprise - - Name: Updating hardware on nodes running on vSphere - File: updating-hardware-on-nodes-running-on-vsphere - - Name: Migrating to a cluster with multi-architecture compute machines - File: migrating-to-multi-payload - - Name: Updating hosted control planes - File: updating-hosted-control-planes -# - Name: Troubleshooting an update -# File: updating-troubleshooting ---- -Name: Support -Dir: support -Distros: openshift-enterprise,openshift-online,openshift-origin -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Getting support - File: getting-support - Distros: openshift-enterprise -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Opting out of remote health reporting - File: opting-out-of-remote-health-reporting - - Name: Enabling remote health reporting - File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using Insights Operator - File: using-insights-operator - - Name: Using remote health reporting in a restricted network - File: remote-health-reporting-from-restricted-network - - Name: Importing simple content access entitlements with Insights Operator - File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data - Distros: openshift-enterprise,openshift-origin -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications - Distros: openshift-enterprise,openshift-origin -- Name: Troubleshooting - Dir: troubleshooting - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Troubleshooting installations - File: troubleshooting-installations - - Name: Verifying node health - File: verifying-node-health - - Name: Troubleshooting CRI-O container runtime issues - File: troubleshooting-crio-issues - - Name: Troubleshooting operating system issues - File: troubleshooting-operating-system-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting network issues - File: troubleshooting-network-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues - - Name: Troubleshooting Windows container workload issues - File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues ---- -Name: Web console -Dir: web_console -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Viewing cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the web console - File: configuring-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Customizing the web console - File: customizing-the-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-enterprise,openshift-online - Topics: - - Name: Installing the web terminal - File: installing-web-terminal - - Name: Configuring the web terminal - File: configuring-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: Disabling the web console - File: disabling-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Creating quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-enterprise,openshift-origin ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - - Name: Managing CLI plugins with Krew - File: managing-cli-plugins-krew - Distros: openshift-enterprise,openshift-origin - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-enterprise,openshift-origin -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-enterprise,openshift-origin,openshift-online - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-enterprise - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: opm CLI - Dir: opm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref ---- -Name: Security and compliance -Dir: security -Distros: openshift-enterprise,openshift-origin,openshift-aro -Topics: -- Name: Security and compliance overview - File: index -- Name: Container security - Dir: container_security - Topics: - - Name: Understanding container security - File: security-understanding - - Name: Understanding host and VM security - File: security-hosts-vms - - Name: Hardening Red Hat Enterprise Linux CoreOS - File: security-hardening - Distros: openshift-enterprise,openshift-aro - - Name: Container image signatures - File: security-container-signature - - Name: Hardening Fedora CoreOS - File: security-hardening - Distros: openshift-origin - - Name: Understanding compliance - File: security-compliance - - Name: Securing container content - File: security-container-content - - Name: Using container registries securely - File: security-registries - - Name: Securing the build process - File: security-build - - Name: Deploying containers - File: security-deploy - - Name: Securing the container platform - File: security-platform - - Name: Securing networks - File: security-network - - Name: Securing attached storage - File: security-storage - - Name: Monitoring cluster events and logs - File: security-monitoring -- Name: Configuring certificates - Dir: certificates - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Replacing the default ingress certificate - File: replacing-default-ingress-certificate - - Name: Adding API server certificates - File: api-server - - Name: Securing service traffic using service serving certificates - File: service-serving-certificate - - Name: Updating the CA bundle - File: updating-ca-bundle -- Name: Certificate types and descriptions - Dir: certificate_types_descriptions - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: User-provided certificates for the API server - File: user-provided-certificates-for-api-server - - Name: Proxy certificates - File: proxy-certificates - - Name: Service CA certificates - File: service-ca-certificates - - Name: Node certificates - File: node-certificates - - Name: Bootstrap certificates - File: bootstrap-certificates - - Name: etcd certificates - File: etcd-certificates - - Name: OLM certificates - File: olm-certificates - - Name: Aggregated API client certificates - File: aggregated-api-client-certificates - - Name: Machine Config Operator certificates - File: machine-config-operator-certificates - - Name: User-provided certificates for default ingress - File: user-provided-certificates-for-default-ingress - - Name: Ingress certificates - File: ingress-certificates - - Name: Monitoring and cluster logging Operator component certificates - File: monitoring-and-cluster-logging-operator-component-certificates - - Name: Control plane certificates - File: control-plane-certificates -- Name: Compliance Operator - Dir: compliance_operator - Topics: - - Name: Compliance Operator release notes - File: compliance-operator-release-notes - - Name: Supported compliance profiles - File: compliance-operator-supported-profiles - - Name: Installing the Compliance Operator - File: compliance-operator-installation - - Name: Updating the Compliance Operator - File: compliance-operator-updating - - Name: Compliance Operator scans - File: compliance-scans - - Name: Understanding the Compliance Operator - File: compliance-operator-understanding - - Name: Managing the Compliance Operator - File: compliance-operator-manage - - Name: Tailoring the Compliance Operator - File: compliance-operator-tailor - - Name: Retrieving Compliance Operator raw results - File: compliance-operator-raw-results - - Name: Managing Compliance Operator remediation - File: compliance-operator-remediation - - Name: Performing advanced Compliance Operator tasks - File: compliance-operator-advanced - - Name: Troubleshooting the Compliance Operator - File: compliance-operator-troubleshooting - - Name: Uninstalling the Compliance Operator - File: compliance-operator-uninstallation - - Name: Using the oc-compliance plugin - File: oc-compliance-plug-in-using - - Name: Understanding the Custom Resource Definitions - File: compliance-operator-crd - -- Name: File Integrity Operator - Dir: file_integrity_operator - Topics: - - Name: File Integrity Operator release notes - File: file-integrity-operator-release-notes - - Name: Installing the File Integrity Operator - File: file-integrity-operator-installation - - Name: Updating the File Integrity Operator - File: file-integrity-operator-updating - - Name: Understanding the File Integrity Operator - File: file-integrity-operator-understanding - - Name: Configuring the File Integrity Operator - File: file-integrity-operator-configuring - - Name: Performing advanced File Integrity Operator tasks - File: file-integrity-operator-advanced-usage - - Name: Troubleshooting the File Integrity Operator - File: file-integrity-operator-troubleshooting -- Name: Security Profiles Operator - Dir: security_profiles_operator - Topics: - - Name: Security Profiles Operator overview - File: spo-overview - - Name: Security Profiles Operator release notes - File: spo-release-notes - - Name: Understanding the Security Profiles Operator - File: spo-understanding - - Name: Enabling the Security Profiles Operator - File: spo-enabling - - Name: Managing seccomp profiles - File: spo-seccomp - - Name: Managing SELinux profiles - File: spo-selinux - - Name: Advanced Security Profiles Operator tasks - File: spo-advanced - - Name: Troubleshooting the Security Profiles Operator - File: spo-troubleshooting - - Name: Uninstalling the Security Profiles Operator - File: spo-uninstalling -- Name: cert-manager Operator for Red Hat OpenShift - Dir: cert_manager_operator - Distros: openshift-enterprise - Topics: - - Name: cert-manager Operator for Red Hat OpenShift overview - File: index - - Name: cert-manager Operator for Red Hat OpenShift release notes - File: cert-manager-operator-release-notes - - Name: Installing the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-install - - Name: Managing certificates with an ACME issuer - File: cert-manager-operator-issuer-acme - - Name: Enabling monitoring for the cert-manager Operator for Red Hat OpenShift - File: cert-manager-monitoring - - Name: Configuring the egress proxy for the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-proxy - - Name: Customizing cert-manager by using the cert-manager Operator API fields - File: cert-manager-customizing-api-fields - - Name: Authenticating the cert-manager Operator with AWS Security Token Service - File: cert-manager-authenticate-aws - - Name: Configuring log levels for cert-manager and the cert-manager Operator for Red Hat OpenShift - File: cert-manager-log-levels - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift with GCP Workload Identity - File: cert-manager-authenticate-gcp - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift on AWS - File: cert-manager-authentication-non-sts - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift on GCP - File: cert-manager-authenticate-non-sts-gcp - - Name: Uninstalling the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-uninstall -- Name: Viewing audit logs - File: audit-log-view -- Name: Configuring the audit log policy - File: audit-log-policy-config -- Name: Configuring TLS security profiles - File: tls-security-profiles -- Name: Configuring seccomp profiles - File: seccomp-profiles -- Name: Allowing JavaScript-based access to the API server from additional hosts - File: allowing-javascript-access-api-server - Distros: openshift-enterprise,openshift-origin -- Name: Encrypting etcd data - File: encrypting-etcd - Distros: openshift-enterprise,openshift-origin -- Name: Scanning pods for vulnerabilities - File: pod-vulnerability-scan - Distros: openshift-enterprise,openshift-origin -- Name: Network-Bound Disk Encryption (NBDE) - Dir: network_bound_disk_encryption - Topics: - - Name: About disk encryption technology - File: nbde-about-disk-encryption-technology - - Name: Tang server installation considerations - File: nbde-tang-server-installation-considerations - - Name: Tang server encryption key management - File: nbde-managing-encryption-keys - - Name: Disaster recovery considerations - File: nbde-disaster-recovery-considerations - Distros: openshift-enterprise,openshift-origin ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring the internal OAuth server - File: configuring-internal-oauth -- Name: Configuring OAuth clients - File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens - Distros: openshift-enterprise,openshift-origin -- Name: Understanding identity provider configuration - File: understanding-identity-provider - Distros: openshift-enterprise,openshift-origin -- Name: Configuring identity providers - Dir: identity_providers - Topics: - - Name: Configuring an htpasswd identity provider - File: configuring-htpasswd-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Keystone identity provider - File: configuring-keystone-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring an LDAP identity provider - File: configuring-ldap-identity-provider - - Name: Configuring a basic authentication identity provider - File: configuring-basic-authentication-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a request header identity provider - File: configuring-request-header-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a GitHub or GitHub Enterprise identity provider - File: configuring-github-identity-provider - - Name: Configuring a GitLab identity provider - File: configuring-gitlab-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Google identity provider - File: configuring-google-identity-provider - - Name: Configuring an OpenID Connect identity provider - File: configuring-oidc-identity-provider -- Name: Using RBAC to define and apply permissions - File: using-rbac -- Name: Removing the kubeadmin user - File: remove-kubeadmin - Distros: openshift-enterprise,openshift-origin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints - Distros: openshift-enterprise,openshift-origin -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission - Distros: openshift-enterprise,openshift-origin -- Name: Impersonating the system:admin user - File: impersonating-system-admin - Distros: openshift-enterprise,openshift-origin -- Name: Syncing LDAP groups - File: ldap-syncing - Distros: openshift-enterprise,openshift-origin -- Name: Managing cloud provider credentials - Dir: managing_cloud_provider_credentials - Topics: - - Name: About the Cloud Credential Operator - File: about-cloud-credential-operator - - Name: Using mint mode - File: cco-mode-mint - - Name: Using passthrough mode - File: cco-mode-passthrough - - Name: Using manual mode - File: cco-mode-manual - - Name: Using manual mode with AWS Security Token Service - File: cco-mode-sts - - Name: Using manual mode with GCP Workload Identity - File: cco-mode-gcp-workload-identity ---- -Name: Networking -Dir: networking -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About networking - File: about-networking -- Name: Understanding networking - File: understanding-networking -- Name: Accessing hosts - File: accessing-hosts -- Name: Networking Operators overview - File: networking-operators-overview -- Name: Understanding the Cluster Network Operator - File: cluster-network-operator - Distros: openshift-enterprise,openshift-origin -- Name: Understanding the DNS Operator - File: dns-operator - Distros: openshift-enterprise,openshift-origin -- Name: Understanding the Ingress Operator - File: ingress-operator - Distros: openshift-enterprise,openshift-origin -- Name: Ingress sharding - File: ingress-sharding -- Name: Understanding the Ingress Node Firewall Operator - File: ingress-node-firewall-operator - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the Ingress Controller for manual DNS management - File: ingress-controller-dnsmgt - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the Ingress Controller endpoint publishing strategy - File: nw-ingress-controller-endpoint-publishing-strategies - Distros: openshift-enterprise,openshift-origin -- Name: Verifying connectivity to an endpoint - File: verifying-connectivity-endpoint -- Name: Changing the cluster network MTU - File: changing-cluster-network-mtu -- Name: Configuring the node port service range - File: configuring-node-port-service-range -- Name: Configuring the cluster network IP address range - File: configuring-cluster-network-range -- Name: Configuring IP failover - File: configuring-ipfailover -- Name: Configuring interface-level network sysctls - File: setting-interface-level-network-sysctls -- Name: Using SCTP - File: using-sctp - Distros: openshift-enterprise,openshift-origin -- Name: Using PTP hardware - File: using-ptp -- Name: Developing PTP events consumer applications - File: ptp-cloud-events-consumer-dev-reference -- Name: External DNS Operator - Dir: external_dns_operator - Topics: - - Name: Understanding the External DNS Operator - File: understanding-external-dns-operator - - Name: Installing the External DNS Operator - File: nw-installing-external-dns-operator-on-cloud-providers - - Name: External DNS Operator configuration parameters - File: nw-configuration-parameters - - Name: Creating DNS records on an public hosted zone for AWS - File: nw-creating-dns-records-on-aws - - Name: Creating DNS records on an public zone for Azure - File: nw-creating-dns-records-on-azure - - Name: Creating DNS records on an public managed zone for GCP - File: nw-creating-dns-records-on-gcp - - Name: Creating DNS records on a public DNS zone for Infoblox - File: nw-creating-dns-records-on-infoblox - - Name: Configuring the cluster-wide proxy on the External DNS Operator - File: nw-configuring-cluster-wide-egress-proxy -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Editing a network policy - File: editing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Defining a default network policy for projects - File: default-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: AWS Load Balancer Operator - Dir: aws_load_balancer_operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: AWS Load Balancer Operator release notes - File: aws-load-balancer-operator-release-notes - - Name: Understanding the AWS Load Balancer Operator - File: understanding-aws-load-balancer-operator - - Name: Installing the AWS Load Balancer Operator - File: install-aws-load-balancer-operator - - Name: Installing the AWS Load Balancer Operator on Security Token Service cluster - File: installing-albo-sts-cluster - - Name: Creating an instance of the AWS Load Balancer Controller - File: create-instance-aws-load-balancer-controller - - Name: Serving Multiple Ingresses through a single AWS Load Balancer - File: multiple-ingress-through-single-alb - - Name: Adding TLS termination on the AWS Load Balancer - File: add-tls-termination - - Name: Configuring cluster-wide proxy on the AWS Load Balancer Operator - File: configure-egress-proxy-aws-load-balancer-operator -- Name: Multiple networks - Dir: multiple_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Understanding multiple networks - File: understanding-multiple-networks - - Name: Configuring an additional network - File: configuring-additional-network - - Name: About virtual routing and forwarding - File: about-virtual-routing-and-forwarding - - Name: Configuring multi-network policy - File: configuring-multi-network-policy - - Name: Attaching a pod to an additional network - File: attaching-pod - - Name: Removing a pod from an additional network - File: removing-pod - - Name: Editing an additional network - File: edit-additional-network - - Name: Removing an additional network - File: remove-additional-network - - Name: Assigning a secondary network to a VRF - File: assigning-a-secondary-network-to-a-vrf -- Name: Hardware networks - Dir: hardware_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About Single Root I/O Virtualization (SR-IOV) hardware networks - File: about-sriov - - Name: Installing the SR-IOV Operator - File: installing-sriov-operator - - Name: Configuring the SR-IOV Operator - File: configuring-sriov-operator - - Name: Configuring an SR-IOV network device - File: configuring-sriov-device - - Name: Configuring an SR-IOV Ethernet network attachment - File: configuring-sriov-net-attach - - Name: Configuring an SR-IOV InfiniBand network attachment - File: configuring-sriov-ib-attach - - Name: Adding a pod to an SR-IOV network - File: add-pod - - Name: Tuning sysctl settings on an SR-IOV network - File: configuring-interface-sysctl-sriov-device - - Name: Using high performance multicast - File: using-sriov-multicast - - Name: Using DPDK and RDMA - File: using-dpdk-and-rdma - - Name: Using pod-level bonding for secondary networks - File: using-pod-level-bonding - - Name: Configuring hardware offloading - File: configuring-hardware-offloading - - Name: Switching Bluefield-2 from NIC to DPU mode - File: switching-bf2-nic-dpu - - Name: Uninstalling the SR-IOV Operator - File: uninstalling-sriov-operator -- Name: OVN-Kubernetes network plugin - Dir: ovn_kubernetes_network_provider - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About the OVN-Kubernetes network plugin - File: about-ovn-kubernetes - - Name: OVN-Kubernetes architecture - File: ovn-kubernetes-architecture-assembly - - Name: OVN-Kubernetes troubleshooting - File: ovn-kubernetes-troubleshooting-sources - - Name: OVN-Kubernetes traffic tracing - File: ovn-kubernetes-tracing-using-ovntrace - - Name: Migrating from the OpenShift SDN network plugin - File: migrate-from-openshift-sdn - - Name: Rolling back to the OpenShift SDN network plugin - File: rollback-to-openshift-sdn - - Name: Migrating from Kuryr - File: migrate-from-kuryr-sdn - - Name: Converting to IPv4/IPv6 dual stack networking - File: converting-to-dual-stack - - Name: Logging for egress firewall and network policy rules - File: logging-network-policy - - Name: Configuring IPsec encryption - File: configuring-ipsec-ovn - - Name: Configuring an egress firewall for a project - File: configuring-egress-firewall-ovn - - Name: Viewing an egress firewall for a project - File: viewing-egress-firewall-ovn - - Name: Editing an egress firewall for a project - File: editing-egress-firewall-ovn - - Name: Removing an egress firewall from a project - File: removing-egress-firewall-ovn - - Name: Configuring an egress IP address - File: configuring-egress-ips-ovn - - Name: Assigning an egress IP address - File: assigning-egress-ips-ovn - - Name: Considerations for the use of an egress router pod - File: using-an-egress-router-ovn - - Name: Deploying an egress router pod in redirect mode - File: deploying-egress-router-ovn-redirection - - Name: Enabling multicast for a project - File: enabling-multicast - - Name: Disabling multicast for a project - File: disabling-multicast - - Name: Tracking network flows - File: tracking-network-flows - - Name: Configuring hybrid networking - File: configuring-hybrid-networking -- Name: OpenShift SDN network plugin - Dir: openshift_sdn - Topics: - - Name: About the OpenShift SDN network plugin - File: about-openshift-sdn - - Name: Migrating to the OpenShift SDN network plugin - File: migrate-to-openshift-sdn - - Name: Rolling back to the OpenShift SDN network plugin - File: rollback-to-ovn-kubernetes - - Name: Configuring egress IPs for a project - File: assigning-egress-ips - Distros: openshift-origin,openshift-enterprise - - Name: Configuring an egress firewall for a project - File: configuring-egress-firewall - - Name: Viewing an egress firewall for a project - File: viewing-egress-firewall - - Name: Editing an egress firewall for a project - File: editing-egress-firewall - - Name: Removing an egress firewall from a project - File: removing-egress-firewall - - Name: Considerations for the use of an egress router pod - File: using-an-egress-router - - Name: Deploying an egress router pod in redirect mode - File: deploying-egress-router-layer3-redirection - - Name: Deploying an egress router pod in HTTP proxy mode - File: deploying-egress-router-http-redirection - - Name: Deploying an egress router pod in DNS proxy mode - File: deploying-egress-router-dns-redirection - - Name: Configuring an egress router pod destination list from a config map - File: configuring-egress-router-configmap - - Name: Enabling multicast for a project - File: enabling-multicast - Distros: openshift-origin,openshift-enterprise - - Name: Disabling multicast for a project - File: disabling-multicast - Distros: openshift-origin,openshift-enterprise - - Name: Configuring multitenant isolation - File: multitenant-isolation - Distros: openshift-origin,openshift-enterprise - - Name: Configuring kube-proxy - File: configuring-kube-proxy - Distros: openshift-enterprise,openshift-origin -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes -- Name: Configuring ingress cluster traffic - Dir: configuring_ingress_cluster_traffic - Topics: - - Name: Overview - File: overview-traffic - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ExternalIPs for services - File: configuring-externalip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using an Ingress Controller - File: configuring-ingress-cluster-traffic-ingress-controller - - Name: Configuring ingress cluster traffic using a load balancer - File: configuring-ingress-cluster-traffic-load-balancer - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic on AWS - File: configuring-ingress-cluster-traffic-aws - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a service external IP - File: configuring-ingress-cluster-traffic-service-external-ip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a NodePort - File: configuring-ingress-cluster-traffic-nodeport - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using load balancer allowed source ranges - File: configuring-ingress-cluster-traffic-load-balancer-allowed-source-ranges - Distros: openshift-enterprise,openshift-origin - # Kubernetes NMState (TECHNOLOGY PREVIEW) -- Name: Kubernetes NMState - Dir: k8s_nmstate - Topics: - - Name: About the Kubernetes NMState Operator - File: k8s-nmstate-about-the-k8s-nmstate-operator - - Name: Observing node network state - File: k8s-nmstate-observing-node-network-state - - Name: Updating node network configuration - File: k8s-nmstate-updating-node-network-config - - Name: Troubleshooting node network configuration - File: k8s-nmstate-troubleshooting-node-network -- Name: Configuring the cluster-wide proxy - File: enable-cluster-wide-proxy - Distros: openshift-enterprise,openshift-origin -- Name: Configuring a custom PKI - File: configuring-a-custom-pki - Distros: openshift-enterprise,openshift-origin -- Name: Load balancing on OpenStack - File: load-balancing-openstack -- Name: Load balancing with MetalLB - Dir: metallb - Topics: - - Name: About MetalLB and the MetalLB Operator - File: about-metallb - - Name: Installing the MetalLB Operator - File: metallb-operator-install - - Name: Upgrading the MetalLB Operator - File: metallb-upgrading-operator - - Name: Configuring MetalLB address pools - File: metallb-configure-address-pools - - Name: Advertising the IP address pools - File: about-advertising-ipaddresspool - - Name: Configuring MetalLB BGP peers - File: metallb-configure-bgp-peers - - Name: Advertising an IP address pool using the community alias - File: metallb-configure-community-alias - - Name: Configuring MetalLB BFD profiles - File: metallb-configure-bfd-profiles - - Name: Configuring services to use MetalLB - File: metallb-configure-services - - Name: MetalLB logging, troubleshooting, and support - File: metallb-troubleshoot-support -- Name: Associating secondary interfaces metrics to network attachments - File: associating-secondary-interfaces-metrics-to-network-attachments -- Name: Network Observability - Dir: network_observability - Topics: - - Name: Network Observability release notes - File: network-observability-operator-release-notes - - Name: Network Observability overview - File: network-observability-overview - - Name: Installing the Network Observability Operator - File: installing-operators - - Name: Understanding Network Observability Operator - File: understanding-network-observability-operator - - Name: Configuring the Network Observability Operator - File: configuring-operator - - Name: Network Policy - File: network-observability-network-policy - - Name: Observing the network traffic - File: observing-network-traffic - - Name: Monitoring the Network Observability Operator - File: network-observability-operator-monitoring - - Name: API reference - File: flowcollector-api - - Name: JSON flows format reference - File: json-flows-format-reference - - Name: Troubleshooting Network Observability - File: troubleshooting-network-observability ---- -Name: Storage -Dir: storage -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Storage overview - File: index - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding persistent storage - File: understanding-persistent-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring persistent storage - Dir: persistent_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using Azure Disk - File: persistent-storage-azure - - Name: Persistent storage using Azure File - File: persistent-storage-azure-file - - Name: Persistent storage using Cinder - File: persistent-storage-cinder - - Name: Persistent storage using Fibre Channel - File: persistent-storage-fibre - - Name: Persistent storage using FlexVolume - File: persistent-storage-flexvolume - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce - - Name: Persistent Storage using iSCSI - File: persistent-storage-iscsi - - Name: Persistent storage using NFS - File: persistent-storage-nfs - - Name: Persistent storage using Red Hat OpenShift Data Foundation - File: persistent-storage-ocs - - Name: Persistent storage using VMware vSphere - File: persistent-storage-vsphere - - Name: Persistent storage using local storage - Dir: persistent_storage_local - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Persistent storage using local volumes - File: persistent-storage-local - - Name: Persistent storage using hostPath - File: persistent-storage-hostpath - - Name: Persistent storage using LVM Storage - File: persistent-storage-using-lvms -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: CSI inline ephemeral volumes - File: ephemeral-storage-csi-inline - - Name: Shared Resource CSI Driver Operator - File: ephemeral-storage-shared-resource-csi-driver-operator - - Name: CSI volume snapshots - File: persistent-storage-csi-snapshots - - Name: CSI volume cloning - File: persistent-storage-csi-cloning - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: CSI automatic migration - File: persistent-storage-csi-migration - - Name: Detach CSI volumes after non-graceful node shutdown - File: persistent-storage-csi-vol-detach-non-graceful-shutdown - - Name: AliCloud Disk CSI Driver Operator - File: persistent-storage-csi-alicloud-disk - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs - - Name: Azure Disk CSI Driver Operator - File: persistent-storage-csi-azure - - Name: Azure File CSI Driver Operator - File: persistent-storage-csi-azure-file - - Name: Azure Stack Hub CSI Driver Operator - File: persistent-storage-csi-azure-stack-hub - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - - Name: GCP Filestore CSI Driver Operator - File: persistent-storage-csi-google-cloud-file - - Name: IBM VPC Block CSI Driver Operator - File: persistent-storage-csi-ibm-vpc-block - - Name: IBM Power Virtual Server Block CSI Driver Operator - File: persistent-storage-csi-ibm-powervs-block - - Name: OpenStack Cinder CSI Driver Operator - File: persistent-storage-csi-cinder - - Name: OpenStack Manila CSI Driver Operator - File: persistent-storage-csi-manila - - Name: VMware vSphere CSI Driver Operator - File: persistent-storage-csi-vsphere -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Expanding persistent volumes - File: expanding-persistent-volumes - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic provisioning - File: dynamic-provisioning - Distros: openshift-enterprise,openshift-origin ---- -Name: Registry -Dir: registry -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Container Platform - File: configuring-registry-operator - Distros: openshift-enterprise -- Name: Image Registry Operator in OKD - File: configuring-registry-operator - Distros: openshift-origin -- Name: Setting up and configuring the registry - Dir: configuring_registry_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring the registry for AWS user-provisioned infrastructure - File: configuring-registry-storage-aws-user-infrastructure - - Name: Configuring the registry for GCP user-provisioned infrastructure - File: configuring-registry-storage-gcp-user-infrastructure - - Name: Configuring the registry for OpenStack user-provisioned infrastructure - File: configuring-registry-storage-openstack-user-infrastructure - - Name: Configuring the registry for Azure user-provisioned infrastructure - File: configuring-registry-storage-azure-user-infrastructure - - Name: Configuring the registry for OpenStack - File: configuring-registry-storage-osp - - Name: Configuring the registry for bare metal - File: configuring-registry-storage-baremetal - - Name: Configuring the registry for vSphere - File: configuring-registry-storage-vsphere - - Name: Configuring the registry for OpenShift Data Foundation - File: configuring-registry-storage-rhodf - Distros: openshift-enterprise,openshift-origin -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry - Distros: openshift-enterprise,openshift-origin ---- -Name: Operators -Dir: operators -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - Distros: openshift-enterprise,openshift-origin - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - Distros: openshift-enterprise - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - Distros: openshift-enterprise,openshift-origin - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Extending the Kubernetes API with CRDs - File: crd-extending-api-with-crds - Distros: openshift-origin,openshift-enterprise - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds - Distros: openshift-origin,openshift-enterprise -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators - Distros: openshift-enterprise,openshift-origin - - Name: Installing Operators in your namespace - File: olm-installing-operators-in-namespace - Distros: openshift-enterprise,openshift-origin -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Updating installed Operators - File: olm-upgrading-operators - Distros: openshift-enterprise,openshift-origin - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Configuring OLM features - File: olm-config - Distros: openshift-enterprise,openshift-origin - - Name: Configuring proxy support - File: olm-configuring-proxy-support - Distros: openshift-enterprise,openshift-origin - - Name: Viewing Operator status - File: olm-status - Distros: openshift-enterprise,openshift-origin - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - Distros: openshift-origin,openshift-enterprise - - Name: Allowing non-cluster administrators to install Operators - File: olm-creating-policy - Distros: openshift-origin,openshift-enterprise - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - Distros: openshift-origin,openshift-enterprise - - Name: Using OLM on restricted networks - File: olm-restricted-networks - Distros: openshift-origin,openshift-enterprise - - Name: Catalog source pod scheduling - File: olm-cs-podsched - Distros: openshift-origin,openshift-enterprise - - Name: Managing platform Operators - File: olm-managing-po - Distros: openshift-enterprise,openshift-origin -- Name: Developing Operators - Dir: operator_sdk - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: - - Name: Getting started - File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: - - Name: Getting started - File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: - - Name: Getting started - File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Hybrid Helm Operator - File: osdk-hybrid-helm - - Name: Updating Hybrid Helm-based projects - File: osdk-hybrid-helm-updating-projects - - Name: Java-based Operators - Dir: java - Topics: - - Name: Getting started - File: osdk-java-quickstart - - Name: Tutorial - File: osdk-java-tutorial - - Name: Project layout - File: osdk-java-project-layout - - Name: Updating Java-based projects - File: osdk-java-updating-projects - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 - Distros: openshift-origin -- Name: Cluster Operators reference - File: operator-reference ---- -Name: CI/CD -Dir: cicd -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CI/CD overview - File: index -- Name: Builds - Dir: builds - Distros: openshift-enterprise,openshift-origin,openshift-online - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies - - Name: Custom image builds with Buildah - File: custom-builds-buildah - Distros: openshift-enterprise,openshift-origin - - Name: Performing and configuring basic builds - File: basic-build-operations - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Performing advanced builds - File: advanced-build-operations - Distros: openshift-enterprise,openshift-origin - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - Distros: openshift-enterprise,openshift-origin - - Name: Securing builds by strategy - File: securing-builds-by-strategy - Distros: openshift-enterprise,openshift-origin - - Name: Build configuration resources - File: build-configuration - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting builds - File: troubleshooting-builds - Distros: openshift-enterprise,openshift-origin - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines - Dir: pipelines - Distros: openshift-enterprise - Topics: - - Name: OpenShift Pipelines release notes - File: op-release-notes - - Name: Understanding OpenShift Pipelines - File: understanding-openshift-pipelines - - Name: Installing OpenShift Pipelines - File: installing-pipelines - - Name: Uninstalling OpenShift Pipelines - File: uninstalling-pipelines - - Name: Creating CI/CD solutions for applications using OpenShift Pipelines - File: creating-applications-with-cicd-pipelines - - Name: Managing non-versioned and versioned cluster tasks - File: managing-nonversioned-and-versioned-cluster-tasks - - Name: Using Tekton Hub with OpenShift Pipelines - File: using-tekton-hub-with-openshift-pipelines - - Name: Specifying remote pipelines and tasks using resolvers - File: remote-pipelines-tasks-resolvers - - Name: Using Pipelines as Code - File: using-pipelines-as-code - - Name: Working with OpenShift Pipelines using the Developer perspective - File: working-with-pipelines-using-the-developer-perspective - - Name: Customizing configurations in the TektonConfig custom resource - File: customizing-configurations-in-the-tektonconfig-cr - - Name: Reducing resource consumption of OpenShift Pipelines - File: reducing-pipelines-resource-consumption - - Name: Setting compute resource quota for OpenShift Pipelines - File: setting-compute-resource-quota-for-openshift-pipelines - - Name: Using pods in a privileged security context - File: using-pods-in-a-privileged-security-context - - Name: Securing webhooks with event listeners - File: securing-webhooks-with-event-listeners - - Name: Authenticating pipelines using git secret - File: authenticating-pipelines-using-git-secret - - Name: Using Tekton Chains for OpenShift Pipelines supply chain security - File: using-tekton-chains-for-openshift-pipelines-supply-chain-security - - Name: Viewing pipeline logs using the OpenShift Logging Operator - File: viewing-pipeline-logs-using-the-openshift-logging-operator - - Name: Unprivileged building of container images using Buildah - File: unprivileged-building-of-container-images-using-buildah -- Name: GitOps - Dir: gitops - Distros: openshift-enterprise - Topics: - - Name: OpenShift GitOps release notes - File: gitops-release-notes - - Name: Understanding OpenShift GitOps - File: understanding-openshift-gitops - - Name: Installing OpenShift GitOps - File: installing-openshift-gitops - - Name: Uninstalling OpenShift GitOps - File: uninstalling-openshift-gitops - - Name: Setting up a new Argo CD instance - File: setting-up-argocd-instance - - Name: Configuring an OpenShift cluster by deploying an application with cluster configurations - File: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations - - Name: Deploying a Spring Boot application with Argo CD - File: deploying-a-spring-boot-application-with-argo-cd - - Name: Argo CD custom resource properties - File: argo-cd-custom-resource-properties - - Name: Monitoring application health status - File: health-information-for-resources-deployment - - Name: Configuring SSO for Argo CD using Dex - File: configuring-sso-on-argo-cd-using-dex - - Name: Configuring SSO for Argo CD using Keycloak - File: configuring-sso-for-argo-cd-using-keycloak - - Name: Configuring Argo CD RBAC - File: configuring-argo-cd-rbac - - Name: Configuring Resource Quota - File: configuring-resource-quota - - Name: Monitoring Argo CD custom resource workloads - File: monitoring-argo-cd-custom-resource-workloads - - Name: Running Control Plane Workloads on Infra nodes - File: run-gitops-control-plane-workload-on-infra-nodes - - Name: Sizing requirements for GitOps Operator - File: about-sizing-requirements-gitops - - Name: Collecting debugging data for a support case - File: collecting-debugging-data-for-support - - Name: Troubleshooting issues in GitOps - File: troubleshooting-issues-in-GitOps -- Name: Jenkins - Dir: jenkins - Distros: openshift-enterprise - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Images -Dir: openshift_images -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Overview of images - File: index -- Name: Configuring the Cluster Samples Operator - File: configuring-samples-operator - Distros: openshift-enterprise,openshift-origin -- Name: Using the Cluster Samples Operator with an alternate registry - File: samples-operator-alt-registry - Distros: openshift-enterprise,openshift-origin -- Name: Creating images - File: create-images -- Name: Managing images - Dir: managing_images - Topics: - - Name: Managing images overview - File: managing-images-overview - - Name: Tagging images - File: tagging-images - - Name: Image pull policy - File: image-pull-policy - - Name: Using image pull secrets - File: using-image-pull-secrets -- Name: Managing image streams - File: image-streams-manage - Distros: openshift-enterprise,openshift-origin -- Name: Using image streams with Kubernetes resources - File: using-imagestreams-with-kube-resources - Distros: openshift-enterprise,openshift-origin -- Name: Triggering updates on image stream changes - File: triggering-updates-on-imagestream-changes - Distros: openshift-enterprise,openshift-origin -- Name: Image configuration resources - File: image-configuration - Distros: openshift-enterprise,openshift-origin -- Name: Using templates - File: using-templates -- Name: Using Ruby on Rails - File: templates-using-ruby-on-rails -- Name: Using images - Dir: using_images - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Using images overview - File: using-images-overview - - Name: Source-to-image - File: using-s21-images - - Name: Customizing source-to-image images - File: customizing-s2i-images ---- -Name: Building applications -Dir: applications -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects - - Name: Creating a project as another user - File: creating-project-other-user - Distros: openshift-enterprise,openshift-origin - - Name: Configuring project creation - File: configuring-project-creation - Distros: openshift-enterprise,openshift-origin -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications using the CLI - File: creating-applications-using-cli -- Name: Viewing application composition using the Topology view - File: odc-viewing-application-composition-using-topology-view -- Name: Exporting applications - File: odc-exporting-applications -- Name: Connecting applications to services - Dir: connecting_applications_to_services - Topics: - - Name: Service Binding Operator release notes - File: sbo-release-notes - - Name: Understanding Service Binding Operator - File: understanding-service-binding-operator - - Name: Installing Service Binding Operator - File: installing-sbo - - Name: Getting started with service binding - File: getting-started-with-service-binding - - Name: Getting started with service binding on IBM Power, IBM Z, and IBM LinuxONE - File: getting-started-with-service-binding-ibm-power-ibm-z - - Name: Exposing binding data from a service - File: exposing-binding-data-from-a-service - - Name: Projecting binding data - File: projecting-binding-data - - Name: Binding workloads using Service Binding Operator - File: binding-workloads-using-sbo - - Name: Connecting an application to a service using the Developer perspective - File: odc-connecting-an-application-to-a-service-using-the-developer-perspective -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Understanding Deployments and DeploymentConfigs - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects - Distros: openshift-enterprise,openshift-origin -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas - Distros: openshift-online -- Name: Pruning objects to reclaim resources - File: pruning-objects - Distros: openshift-origin,openshift-enterprise -- Name: Idling applications - File: idling-applications - Distros: openshift-origin,openshift-enterprise -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace - Distros: openshift-origin,openshift-enterprise ---- -Name: Machine management -Dir: machine_management -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of machine management - File: index -- Name: Managing compute machines with the Machine API - Dir: creating_machinesets - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Creating a compute machine set on Alibaba Cloud - File: creating-machineset-alibaba - - Name: Creating a compute machine set on AWS - File: creating-machineset-aws - - Name: Creating a compute machine set on Azure - File: creating-machineset-azure - - Name: Creating a compute machine set on Azure Stack Hub - File: creating-machineset-azure-stack-hub - - Name: Creating a compute machine set on GCP - File: creating-machineset-gcp - - Name: Creating a compute machine set on IBM Cloud - File: creating-machineset-ibm-cloud - - Name: Creating a compute machine set on IBM Power Virtual Server - File: creating-machineset-ibm-power-vs - - Name: Creating a compute machine set on Nutanix - File: creating-machineset-nutanix - - Name: Creating a compute machine set on OpenStack - File: creating-machineset-osp - - Name: Creating a compute machine set on vSphere - File: creating-machineset-vsphere - - Name: Creating a compute machine set on bare metal - File: creating-machineset-bare-metal -- Name: Manually scaling a compute machine set - File: manually-scaling-machineset -- Name: Modifying a compute machine set - File: modifying-machineset -- Name: Deleting a machine - File: deleting-machine -- Name: Applying autoscaling to a cluster - File: applying-autoscaling -- Name: Creating infrastructure machine sets - File: creating-infrastructure-machinesets -- Name: Adding a RHEL compute machine - File: adding-rhel-compute - Distros: openshift-enterprise -- Name: Adding more RHEL compute machines - File: more-rhel-compute - Distros: openshift-enterprise -- Name: Managing user-provisioned infrastructure manually - Dir: user_infra - Topics: - - Name: Adding compute machines to clusters with user-provisioned infrastructure manually - File: adding-compute-user-infra-general - - Name: Adding compute machines to AWS using CloudFormation templates - File: adding-aws-compute-user-infra - - Name: Adding compute machines to vSphere manually - File: adding-vsphere-compute-user-infra - - Name: Adding compute machines to bare metal - File: adding-bare-metal-compute-user-infra -- Name: Managing machines with the Cluster API - File: capi-machine-management -- Name: Managing control plane machines - Dir: control_plane_machine_management - Topics: - - Name: About control plane machine sets - File: cpmso-about - - Name: Getting started with control plane machine sets - File: cpmso-getting-started - - Name: Control plane machine set configuration - File: cpmso-configuration - - Name: Using control plane machine sets - File: cpmso-using - - Name: Control plane resiliency and recovery - File: cpmso-resiliency - - Name: Troubleshooting the control plane machine set - File: cpmso-troubleshooting - - Name: Disabling the control plane machine set - File: cpmso-disabling -- Name: Deploying machine health checks - File: deploying-machine-health-checks ---- -Name: Hosted control planes -Dir: hosted_control_planes -Distros: openshift-enterprise, openshift-origin -Topics: -- Name: Hosted control planes overview - File: index -- Name: Configuring hosted control planes - File: hcp-configuring -- Name: Managing hosted control planes - File: hcp-managing -- Name: Backup, restore, and disaster recovery for hosted control planes - File: hcp-backup-restore-dr ---- -Name: Nodes -Dir: nodes -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring - Distros: openshift-enterprise,openshift-origin - - Name: Automatically scaling pods with the horizontal pod autoscaler - File: nodes-pods-autoscaling - - Name: Automatically adjust pod resource levels with the vertical pod autoscaler - File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods - File: nodes-pods-secrets - - Name: Creating and using config maps - File: nodes-pods-configmaps - - Name: Using Device Manager to make devices available to nodes - File: nodes-pods-plugins - Distros: openshift-enterprise,openshift-origin - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - Distros: openshift-enterprise,openshift-origin - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors - Distros: openshift-enterprise,openshift-origin - - Name: Run Once Duration Override Operator - Dir: run_once_duration_override - Distros: openshift-enterprise - Topics: - - Name: Run Once Duration Override Operator overview - File: index - - Name: Run Once Duration Override Operator release notes - File: run-once-duration-override-release-notes - - Name: Overriding the active deadline for run-once pods - File: run-once-duration-override-install - - Name: Uninstalling the Run Once Duration Override Operator - File: run-once-duration-override-uninstall -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding the custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Scheduling pods using a scheduler profile - File: nodes-scheduler-profiles - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit - - Name: Controlling pod placement using node taints - File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler - - Name: Evicting pods using the descheduler - File: nodes-descheduler - - Name: Secondary scheduler - Dir: secondary_scheduler - Distros: openshift-enterprise - Topics: - - Name: Secondary scheduler overview - File: index - - Name: Secondary Scheduler Operator release notes - File: nodes-secondary-scheduler-release-notes - - Name: Scheduling pods using a secondary scheduler - File: nodes-secondary-scheduler-configuring - - Name: Uninstalling the Secondary Scheduler Operator - File: nodes-secondary-scheduler-uninstalling -- Name: Using Jobs and DaemonSets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemonsets - File: nodes-pods-daemonsets - Distros: openshift-enterprise,openshift-origin - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing - - Name: Working with nodes - File: nodes-nodes-working - - Name: Managing nodes - File: nodes-nodes-managing - - Name: Managing graceful node shutdown - File: nodes-nodes-graceful-shutdown - - Name: Managing the maximum number of pods per node - File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator - - Name: Remediating, fencing, and maintaining nodes - File: nodes-remediating-fencing-maintaining-rhwa - - Name: Understanding node rebooting - File: nodes-nodes-rebooting - - Name: Freeing node resources using garbage collection - File: nodes-nodes-garbage-collection - - Name: Allocating resources for nodes - File: nodes-nodes-resources-configuring - - Name: Allocating specific CPUs for nodes in a cluster - File: nodes-nodes-resources-cpus - - Name: Enabling TLS security profiles for the kubelet - File: nodes-nodes-tls - Distros: openshift-enterprise,openshift-origin -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector - - Name: Machine Config Daemon metrics - File: nodes-nodes-machine-config-daemon-metrics - - Name: Creating infrastructure nodes - File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - Distros: openshift-enterprise,openshift-origin - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding - - Name: Using sysctls in containers - File: nodes-containers-sysctls -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - Distros: openshift-enterprise,openshift-origin - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - Distros: openshift-enterprise,openshift-origin - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit - Distros: openshift-enterprise,openshift-origin - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-2 - Distros: openshift-enterprise - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-okd - Distros: openshift-origin - - Name: Enabling features using FeatureGates - File: nodes-cluster-enabling-features - Distros: openshift-enterprise,openshift-origin - - Name: Improving cluster stability in high latency environments using worker latency profiles - File: nodes-cluster-worker-latency-profiles - Distros: openshift-enterprise,openshift-origin -- Name: Remote worker nodes on the network edge - Dir: edge - Distros: openshift-enterprise - Topics: - - Name: Using remote worker node at the network edge - File: nodes-edge-remote-workers -- Name: Worker nodes for single-node OpenShift clusters - Dir: nodes - Distros: openshift-enterprise - Topics: - - Name: Adding worker nodes to single-node OpenShift clusters - File: nodes-sno-worker-nodes ---- -Name: Windows Container Support for OpenShift -Dir: windows_containers -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Red Hat OpenShift support for Windows Containers overview - File: index -- Name: Red Hat OpenShift support for Windows Containers release notes - File: windows-containers-release-notes-6-x -- Name: Understanding Windows container workloads - File: understanding-windows-container-workloads -- Name: Enabling Windows container workloads - File: enabling-windows-container-workloads -- Name: Creating Windows machine sets - Dir: creating_windows_machinesets - Topics: - - Name: Creating a Windows machine set on AWS - File: creating-windows-machineset-aws - - Name: Creating a Windows machine set on Azure - File: creating-windows-machineset-azure - - Name: Creating a Windows machine set on vSphere - File: creating-windows-machineset-vsphere - - Name: Creating a Windows machine set on GCP - File: creating-windows-machineset-gcp -- Name: Scheduling Windows container workloads - File: scheduling-windows-workloads -- Name: Windows node upgrades - File: windows-node-upgrades -- Name: Using Bring-Your-Own-Host Windows instances as nodes - File: byoh-windows-instance -- Name: Removing Windows nodes - File: removing-windows-nodes -- Name: Disabling Windows container workloads - File: disabling-windows-container-workloads ---- -Name: Sandboxed Containers Support for OpenShift -Dir: sandboxed_containers -Distros: openshift-enterprise -Topics: -- Name: OpenShift sandboxed containers documentation has been moved - File: sandboxed-containers-moved ---- -Name: Logging -Dir: logging -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: Logging 5.7 - Dir: v5_7 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.7 Release Notes - File: logging-5-7-release-notes - - Name: Getting started with logging - File: logging-5-7-getting-started - - Name: Understanding Logging - File: logging-5-7-architecture - - Name: Configuring Logging - File: logging-5-7-configuration - - Name: Administering Logging - File: logging-5-7-administration -# Name: Logging Reference -# File: logging-5-7-reference -- Name: Logging 5.6 - Dir: v5_6 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.6 Release Notes - File: logging-5-6-release-notes - - Name: Getting started with logging - File: logging-5-6-getting-started - - Name: Understanding Logging - File: logging-5-6-architecture - - Name: Configuring Logging - File: logging-5-6-configuration - - Name: Administering Logging - File: logging-5-6-administration - - Name: Logging Reference - File: logging-5-6-reference -- Name: Logging 5.5 - Dir: v5_5 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.5 Release Notes - File: logging-5-5-release-notes - - Name: Getting started with logging - File: logging-5-5-getting-started - - Name: Understanding Logging - File: logging-5-5-architecture - - Name: Administering Logging - File: logging-5-5-administration -# - Name: Configuring Logging -# File: logging-5-5-configuration -# - Name: Logging Reference -# File: logging-5-5-reference -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying - Distros: openshift-enterprise,openshift-origin -- Name: Configuring your Logging deployment - Dir: config - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - - Name: Configuring systemd-journald for Logging - File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer - Distros: openshift-enterprise,openshift-origin -- Name: Forwarding logs to third party systems - File: cluster-logging-external - Distros: openshift-enterprise,openshift-origin -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter - Distros: openshift-enterprise,openshift-origin -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -# Distros: openshift-enterprise,openshift-origin -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields - Distros: openshift-enterprise,openshift-origin ---- -Name: Monitoring -Dir: monitoring -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Monitoring overview - File: monitoring-overview -- Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack -- Name: Enabling monitoring for user-defined projects - File: enabling-monitoring-for-user-defined-projects -- Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: managing-metrics -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards -- Name: The NVIDIA GPU administration dashboard - File: nvidia-gpu-admin-dashboard -- Name: Monitoring bare-metal events - File: using-rfhe -- Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis -- Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues -- Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator ---- -Name: Scalability and performance -Dir: scalability_and_performance -Distros: openshift-origin,openshift-enterprise,openshift-webscale,openshift-dpu -Topics: -- Name: Recommended performance and scalability practices - Dir: recommended-performance-scale-practices - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Recommended control plane practices - File: recommended-control-plane-practices - - Name: Recommended infrastructure practices - File: recommended-infrastructure-practices - - Name: Recommended etcd practices - File: recommended-etcd-practices -- Name: Planning your environment according to object maximums - File: planning-your-environment-according-to-object-maximums - Distros: openshift-origin,openshift-enterprise -- Name: Recommended host practices for IBM Z & IBM LinuxONE environments - File: ibm-z-recommended-host-practices - Distros: openshift-enterprise -- Name: Using the Node Tuning Operator - File: using-node-tuning-operator - Distros: openshift-origin,openshift-enterprise -- Name: Using CPU Manager and Topology Manager - File: using-cpu-manager - Distros: openshift-origin,openshift-enterprise -- Name: Scheduling NUMA-aware workloads - File: cnf-numa-aware-scheduling - Distros: openshift-origin,openshift-enterprise -- Name: Scalability and performance optimization - Dir: optimization - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Optimizing storage - File: optimizing-storage - - Name: Optimizing routing - File: routing-optimization - - Name: Optimizing networking - File: optimizing-networking - - Name: Optimizing CPU usage - File: optimizing-cpu-usage -- Name: Managing bare metal hosts - File: managing-bare-metal-hosts - Distros: openshift-origin,openshift-enterprise -- Name: What huge pages do and how they are consumed by apps - File: what-huge-pages-do-and-how-they-are-consumed-by-apps - Distros: openshift-origin,openshift-enterprise -- Name: Low latency tuning - File: cnf-low-latency-tuning - Distros: openshift-origin,openshift-enterprise -- Name: Performing latency tests for platform verification - File: cnf-performing-platform-verification-latency-tests -- Name: Improving cluster stability in high latency environments using worker latency profiles - File: scaling-worker-latency-profiles -- Name: Topology Aware Lifecycle Manager for cluster updates - File: cnf-talm-for-cluster-upgrades - Distros: openshift-origin,openshift-enterprise -- Name: Creating a performance profile - File: cnf-create-performance-profiles - Distros: openshift-origin,openshift-enterprise -- Name: Workload partitioning - File: enabling-workload-partitioning - Distros: openshift-origin,openshift-enterprise -- Name: Requesting CRI-O and Kubelet profiling data by using the Node Observability Operator - File: node-observability-operator - Distros: openshift-origin,openshift-enterprise -- Name: Clusters at the network far edge - Dir: ztp_far_edge - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Challenges of the network far edge - File: ztp-deploying-far-edge-clusters-at-scale - - Name: Preparing the hub cluster for ZTP - File: ztp-preparing-the-hub-cluster - - Name: Installing managed clusters with RHACM and SiteConfig resources - File: ztp-deploying-far-edge-sites - - Name: Configuring managed clusters with policies and PolicyGenTemplate resources - File: ztp-configuring-managed-clusters-policies - - Name: Manually installing a single-node OpenShift cluster with ZTP - File: ztp-manual-install - - Name: Recommended single-node OpenShift cluster configuration for vDU application workloads - File: ztp-reference-cluster-configuration-for-vdu - - Name: Validating cluster tuning for vDU application workloads - File: ztp-vdu-validating-cluster-tuning - - Name: Advanced managed cluster configuration with SiteConfig resources - File: ztp-advanced-install-ztp - - Name: Advanced managed cluster configuration with PolicyGenTemplate resources - File: ztp-advanced-policy-config - - Name: Updating managed clusters with the Topology Aware Lifecycle Manager - File: ztp-talm-updating-managed-policies - - Name: Updating GitOps ZTP - File: ztp-updating-gitops - - Name: Expanding single-node OpenShift clusters with GitOps ZTP - File: ztp-sno-additional-worker-node - - Name: Pre-caching images for single-node OpenShift deployments - File: ztp-precaching-tool ---- -Name: Specialized hardware and driver enablement -Dir: hardware_enablement -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: About specialized hardware and driver enablement - File: about-hardware-enablement -- Name: Driver Toolkit - File: psap-driver-toolkit -- Name: Node Feature Discovery Operator - File: psap-node-feature-discovery-operator -- Name: Kernel Module Management Operator - File: kmm-kernel-module-management ---- -Name: Backup and restore -Dir: backup_and_restore -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of backup and restore operations - File: index -- Name: Shutting down a cluster gracefully - File: graceful-cluster-shutdown -- Name: Restarting a cluster gracefully - File: graceful-cluster-restart -- Name: Application backup and restore - Dir: application_backup_and_restore - Topics: - - Name: OADP release notes - File: oadp-release-notes - - Name: OADP features and plugins - File: oadp-features-plugins - - Name: Installing and configuring OADP - Dir: installing - Topics: - - Name: About installing OADP - File: about-installing-oadp - - Name: Installing and configuring OADP with AWS - File: installing-oadp-aws - - Name: Installing and configuring OADP with Azure - File: installing-oadp-azure - - Name: Installing and configuring OADP with GCP - File: installing-oadp-gcp - - Name: Installing and configuring OADP with MCG - File: installing-oadp-mcg - - Name: Installing and configuring OADP with ODF - File: installing-oadp-ocs - - Name: Uninstalling OADP - File: uninstalling-oadp - - Name: Backing up and restoring - Dir: backing_up_and_restoring - Topics: - - Name: Backing up applications - File: backing-up-applications - - Name: Restoring applications - File: restoring-applications - - Name: Troubleshooting - File: troubleshooting - - Name: OADP API - File: oadp-api - - Name: Advanced OADP features and functionalities - File: oadp-advanced-topics -- Name: Control plane backup and restore - Dir: control_plane_backup_and_restore - Topics: - - Name: Backing up etcd data - File: backing-up-etcd - - Name: Replacing an unhealthy etcd member - File: replacing-unhealthy-etcd-member - - Name: Disaster recovery - Dir: disaster_recovery - Topics: - - Name: About disaster recovery - File: about-disaster-recovery - - Name: Restoring to a previous cluster state - File: scenario-2-restoring-cluster-state - - Name: Recovering from expired control plane certificates - File: scenario-3-expired-certs ---- -Name: Migrating from version 3 to 4 -Dir: migrating_from_ocp_3_to_4 -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Migrating from version 3 to 4 overview - File: index -- Name: About migrating from OpenShift Container Platform 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-enterprise -- Name: About migrating from OKD 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-origin -- Name: Differences between OpenShift Container Platform 3 and 4 - File: planning-migration-3-4 - Distros: openshift-enterprise -- Name: Differences between OKD 3 and 4 - File: planning-migration-3-4 - Distros: openshift-origin -- Name: Network considerations - File: planning-considerations-3-4 -- Name: About MTC - File: about-mtc-3-4 -- Name: Installing MTC - File: installing-3-4 -- Name: Installing MTC in a restricted network environment - File: installing-restricted-3-4 -- Name: Upgrading MTC - File: upgrading-3-4 -- Name: Premigration checklists - File: premigration-checklists-3-4 -- Name: Migrating your applications - File: migrating-applications-3-4 -- Name: Advanced migration options - File: advanced-migration-options-3-4 -- Name: Troubleshooting - File: troubleshooting-3-4 ---- -Name: Migration Toolkit for Containers -Dir: migration_toolkit_for_containers -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About MTC - File: about-mtc -- Name: MTC release notes - File: mtc-release-notes -- Name: Installing MTC - File: installing-mtc -- Name: Installing MTC in a restricted network environment - File: installing-mtc-restricted -- Name: Upgrading MTC - File: upgrading-mtc -- Name: Premigration checklists - File: premigration-checklists-mtc -- Name: Network considerations - File: network-considerations-mtc -- Name: Migrating your applications - File: migrating-applications-with-mtc -- Name: Advanced migration options - File: advanced-migration-options-mtc -- Name: Troubleshooting - File: troubleshooting-mtc ---- -Name: API reference -Dir: rest_api -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Understanding API tiers - File: understanding-api-support-tiers -- Name: API compatibility guidelines - File: understanding-compatibility-guidelines -- Name: Editing kubelet log level verbosity and gathering logs - File: editing-kubelet-log-level-verbosity -- Name: API list - File: index -- Name: Common object reference - Dir: objects - Topics: - - Name: Index - File: index -- Name: Authorization APIs - Dir: authorization_apis - Topics: - - Name: About Authorization APIs - File: authorization-apis-index - - Name: 'LocalResourceAccessReview [authorization.openshift.io/v1]' - File: localresourceaccessreview-authorization-openshift-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.openshift.io/v1]' - File: localsubjectaccessreview-authorization-openshift-io-v1 - - Name: 'ResourceAccessReview [authorization.openshift.io/v1]' - File: resourceaccessreview-authorization-openshift-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.openshift.io/v1]' - File: selfsubjectrulesreview-authorization-openshift-io-v1 - - Name: 'SubjectAccessReview [authorization.openshift.io/v1]' - File: subjectaccessreview-authorization-openshift-io-v1 - - Name: 'SubjectRulesReview [authorization.openshift.io/v1]' - File: subjectrulesreview-authorization-openshift-io-v1 - - Name: 'TokenRequest [authentication.k8s.io/v1]' - File: tokenrequest-authentication-k8s-io-v1 - - Name: 'TokenReview [authentication.k8s.io/v1]' - File: tokenreview-authentication-k8s-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.k8s.io/v1]' - File: localsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectAccessReview [authorization.k8s.io/v1]' - File: selfsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.k8s.io/v1]' - File: selfsubjectrulesreview-authorization-k8s-io-v1 - - Name: 'SubjectAccessReview [authorization.k8s.io/v1]' - File: subjectaccessreview-authorization-k8s-io-v1 -- Name: Autoscale APIs - Dir: autoscale_apis - Topics: - - Name: About Autoscale APIs - File: autoscale-apis-index - - Name: 'ClusterAutoscaler [autoscaling.openshift.io/v1]' - File: clusterautoscaler-autoscaling-openshift-io-v1 - - Name: 'MachineAutoscaler [autoscaling.openshift.io/v1beta1]' - File: machineautoscaler-autoscaling-openshift-io-v1beta1 - - Name: 'HorizontalPodAutoscaler [autoscaling/v2]' - File: horizontalpodautoscaler-autoscaling-v2 - - Name: 'Scale [autoscaling/v1]' - File: scale-autoscaling-v1 -- Name: Config APIs - Dir: config_apis - Topics: - - Name: About Config APIs - File: config-apis-index - - Name: 'APIServer [config.openshift.io/v1]' - File: apiserver-config-openshift-io-v1 - - Name: 'Authentication [config.openshift.io/v1]' - File: authentication-config-openshift-io-v1 - - Name: 'Build [config.openshift.io/v1]' - File: build-config-openshift-io-v1 - - Name: 'ClusterOperator [config.openshift.io/v1]' - File: clusteroperator-config-openshift-io-v1 - - Name: 'ClusterVersion [config.openshift.io/v1]' - File: clusterversion-config-openshift-io-v1 - - Name: 'Console [config.openshift.io/v1]' - File: console-config-openshift-io-v1 - - Name: 'DNS [config.openshift.io/v1]' - File: dns-config-openshift-io-v1 - - Name: 'FeatureGate [config.openshift.io/v1]' - File: featuregate-config-openshift-io-v1 - - Name: 'HelmChartRepository [helm.openshift.io/v1beta1]' - File: helmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Image [config.openshift.io/v1]' - File: image-config-openshift-io-v1 - - Name: 'ImageDigestMirrorSet [config.openshift.io/v1]' - File: imagedigestmirrorset-config-openshift-io-v1 - - Name: 'ImageContentPolicy [config.openshift.io/v1]' - File: imagecontentpolicy-config-openshift-io-v1 - - Name: 'ImageTagMirrorSet [config.openshift.io/v1]' - File: imagetagmirrorset-config-openshift-io-v1 - - Name: 'Infrastructure [config.openshift.io/v1]' - File: infrastructure-config-openshift-io-v1 - - Name: 'Ingress [config.openshift.io/v1]' - File: ingress-config-openshift-io-v1 - - Name: 'Network [config.openshift.io/v1]' - File: network-config-openshift-io-v1 - - Name: 'Node [config.openshift.io/v1]' - File: node-config-openshift-io-v1 - - Name: 'OAuth [config.openshift.io/v1]' - File: oauth-config-openshift-io-v1 - - Name: 'OperatorHub [config.openshift.io/v1]' - File: operatorhub-config-openshift-io-v1 - - Name: 'Project [config.openshift.io/v1]' - File: project-config-openshift-io-v1 - - Name: 'ProjectHelmChartRepository [helm.openshift.io/v1beta1]' - File: projecthelmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Proxy [config.openshift.io/v1]' - File: proxy-config-openshift-io-v1 - - Name: 'Scheduler [config.openshift.io/v1]' - File: scheduler-config-openshift-io-v1 -- Name: Console APIs - Dir: console_apis - Topics: - - Name: About Console APIs - File: console-apis-index - - Name: 'ConsoleCLIDownload [console.openshift.io/v1]' - File: consoleclidownload-console-openshift-io-v1 - - Name: 'ConsoleExternalLogLink [console.openshift.io/v1]' - File: consoleexternalloglink-console-openshift-io-v1 - - Name: 'ConsoleLink [console.openshift.io/v1]' - File: consolelink-console-openshift-io-v1 - - Name: 'ConsoleNotification [console.openshift.io/v1]' - File: consolenotification-console-openshift-io-v1 - - Name: 'ConsolePlugin [console.openshift.io/v1]' - File: consoleplugin-console-openshift-io-v1 - - Name: 'ConsoleQuickStart [console.openshift.io/v1]' - File: consolequickstart-console-openshift-io-v1 - - Name: 'ConsoleYAMLSample [console.openshift.io/v1]' - File: consoleyamlsample-console-openshift-io-v1 -- Name: Extension APIs - Dir: extension_apis - Topics: - - Name: About Extension APIs - File: extension-apis-index - - Name: 'APIService [apiregistration.k8s.io/v1]' - File: apiservice-apiregistration-k8s-io-v1 - - Name: 'CustomResourceDefinition [apiextensions.k8s.io/v1]' - File: customresourcedefinition-apiextensions-k8s-io-v1 - - Name: 'MutatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1 - - Name: 'ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1 -- Name: Image APIs - Dir: image_apis - Topics: - - Name: About Image APIs - File: image-apis-index - - Name: 'Image [image.openshift.io/v1]' - File: image-image-openshift-io-v1 - - Name: 'ImageSignature [image.openshift.io/v1]' - File: imagesignature-image-openshift-io-v1 - - Name: 'ImageStreamImage [image.openshift.io/v1]' - File: imagestreamimage-image-openshift-io-v1 - - Name: 'ImageStreamImport [image.openshift.io/v1]' - File: imagestreamimport-image-openshift-io-v1 - - Name: 'ImageStreamLayers [image.openshift.io/v1]' - File: imagestreamlayers-image-openshift-io-v1 - - Name: 'ImageStreamMapping [image.openshift.io/v1]' - File: imagestreammapping-image-openshift-io-v1 - - Name: 'ImageStream [image.openshift.io/v1]' - File: imagestream-image-openshift-io-v1 - - Name: 'ImageStreamTag [image.openshift.io/v1]' - File: imagestreamtag-image-openshift-io-v1 - - Name: 'ImageTag [image.openshift.io/v1]' - File: imagetag-image-openshift-io-v1 - - Name: 'SecretList [image.openshift.io/v1]' - File: secretlist-image-openshift-io-v1 -- Name: Machine APIs - Dir: machine_apis - Topics: - - Name: About Machine APIs - File: machine-apis-index - - Name: 'ContainerRuntimeConfig [machineconfiguration.openshift.io/v1]' - File: containerruntimeconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControllerConfig [machineconfiguration.openshift.io/v1]' - File: controllerconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControlPlaneMachineSet [machine.openshift.io/v1]' - File: controlplanemachineset-machine-openshift-io-v1 - - Name: 'KubeletConfig [machineconfiguration.openshift.io/v1]' - File: kubeletconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]' - File: machineconfigpool-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfig [machineconfiguration.openshift.io/v1]' - File: machineconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineHealthCheck [machine.openshift.io/v1beta1]' - File: machinehealthcheck-machine-openshift-io-v1beta1 - - Name: 'Machine [machine.openshift.io/v1beta1]' - File: machine-machine-openshift-io-v1beta1 - - Name: 'MachineSet [machine.openshift.io/v1beta1]' - File: machineset-machine-openshift-io-v1beta1 -- Name: Metadata APIs - Dir: metadata_apis - Topics: - - Name: About Metadata APIs - File: metadata-apis-index - - Name: 'APIRequestCount [apiserver.openshift.io/v1]' - File: apirequestcount-apiserver-openshift-io-v1 - - Name: 'Binding [undefined/v1]' - File: binding-v1 - - Name: 'ComponentStatus [undefined/v1]' - File: componentstatus-v1 - - Name: 'ConfigMap [undefined/v1]' - File: configmap-v1 - - Name: 'ControllerRevision [apps/v1]' - File: controllerrevision-apps-v1 - - Name: 'Event [events.k8s.io/v1]' - File: event-events-k8s-io-v1 - - Name: 'Event [undefined/v1]' - File: event-v1 - - Name: 'Lease [coordination.k8s.io/v1]' - File: lease-coordination-k8s-io-v1 - - Name: 'Namespace [undefined/v1]' - File: namespace-v1 -- Name: Monitoring APIs - Dir: monitoring_apis - Topics: - - Name: About Monitoring APIs - File: monitoring-apis-index - - Name: 'Alertmanager [monitoring.coreos.com/v1]' - File: alertmanager-monitoring-coreos-com-v1 - - Name: 'AlertmanagerConfig [monitoring.coreos.com/v1beta1]' - File: alertmanagerconfig-monitoring-coreos-com-v1beta1 - - Name: 'PodMonitor [monitoring.coreos.com/v1]' - File: podmonitor-monitoring-coreos-com-v1 - - Name: 'Probe [monitoring.coreos.com/v1]' - File: probe-monitoring-coreos-com-v1 - - Name: 'Prometheus [monitoring.coreos.com/v1]' - File: prometheus-monitoring-coreos-com-v1 - - Name: 'PrometheusRule [monitoring.coreos.com/v1]' - File: prometheusrule-monitoring-coreos-com-v1 - - Name: 'ServiceMonitor [monitoring.coreos.com/v1]' - File: servicemonitor-monitoring-coreos-com-v1 - - Name: 'ThanosRuler [monitoring.coreos.com/v1]' - File: thanosruler-monitoring-coreos-com-v1 -- Name: Network APIs - Dir: network_apis - Topics: - - Name: About Network APIs - File: network-apis-index - - Name: 'CloudPrivateIPConfig [cloud.network.openshift.io/v1]' - File: cloudprivateipconfig-cloud-network-openshift-io-v1 - - Name: 'EgressFirewall [k8s.ovn.org/v1]' - File: egressfirewall-k8s-ovn-org-v1 - - Name: 'EgressIP [k8s.ovn.org/v1]' - File: egressip-k8s-ovn-org-v1 - - Name: 'EgressQoS [k8s.ovn.org/v1]' - File: egressqos-k8s-ovn-org-v1 - - Name: 'Endpoints [undefined/v1]' - File: endpoints-v1 - - Name: 'EndpointSlice [discovery.k8s.io/v1]' - File: endpointslice-discovery-k8s-io-v1 - - Name: 'EgressRouter [network.operator.openshift.io/v1]' - File: egressrouter-network-operator-openshift-io-v1 - - Name: 'Ingress [networking.k8s.io/v1]' - File: ingress-networking-k8s-io-v1 - - Name: 'IngressClass [networking.k8s.io/v1]' - File: ingressclass-networking-k8s-io-v1 - - Name: 'IPPool [whereabouts.cni.cncf.io/v1alpha1]' - File: ippool-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'NetworkAttachmentDefinition [k8s.cni.cncf.io/v1]' - File: networkattachmentdefinition-k8s-cni-cncf-io-v1 - - Name: 'NetworkPolicy [networking.k8s.io/v1]' - File: networkpolicy-networking-k8s-io-v1 - - Name: 'OverlappingRangeIPReservation [whereabouts.cni.cncf.io/v1alpha1]' - File: overlappingrangeipreservation-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'PodNetworkConnectivityCheck [controlplane.operator.openshift.io/v1alpha1]' - File: podnetworkconnectivitycheck-controlplane-operator-openshift-io-v1alpha1 - - Name: 'Route [route.openshift.io/v1]' - File: route-route-openshift-io-v1 - - Name: 'Service [undefined/v1]' - File: service-v1 -- Name: Node APIs - Dir: node_apis - Topics: - - Name: About Node APIs - File: node-apis-index - - Name: 'Node [undefined/v1]' - File: node-v1 - - Name: 'PerformanceProfile [performance.openshift.io/v2]' - File: performanceprofile-performance-openshift-io-v2 - - Name: 'Profile [tuned.openshift.io/v1]' - File: profile-tuned-openshift-io-v1 - - Name: 'RuntimeClass [node.k8s.io/v1]' - File: runtimeclass-node-k8s-io-v1 - - Name: 'Tuned [tuned.openshift.io/v1]' - File: tuned-tuned-openshift-io-v1 -- Name: OAuth APIs - Dir: oauth_apis - Topics: - - Name: About OAuth APIs - File: oauth-apis-index - - Name: 'OAuthAccessToken [oauth.openshift.io/v1]' - File: oauthaccesstoken-oauth-openshift-io-v1 - - Name: 'OAuthAuthorizeToken [oauth.openshift.io/v1]' - File: oauthauthorizetoken-oauth-openshift-io-v1 - - Name: 'OAuthClientAuthorization [oauth.openshift.io/v1]' - File: oauthclientauthorization-oauth-openshift-io-v1 - - Name: 'OAuthClient [oauth.openshift.io/v1]' - File: oauthclient-oauth-openshift-io-v1 - - Name: 'UserOAuthAccessToken [oauth.openshift.io/v1]' - File: useroauthaccesstoken-oauth-openshift-io-v1 -- Name: Operator APIs - Dir: operator_apis - Topics: - - Name: About Operator APIs - File: operator-apis-index - - Name: 'Authentication [operator.openshift.io/v1]' - File: authentication-operator-openshift-io-v1 - - Name: 'CloudCredential [operator.openshift.io/v1]' - File: cloudcredential-operator-openshift-io-v1 - - Name: 'ClusterCSIDriver [operator.openshift.io/v1]' - File: clustercsidriver-operator-openshift-io-v1 - - Name: 'Console [operator.openshift.io/v1]' - File: console-operator-openshift-io-v1 - - Name: 'Config [operator.openshift.io/v1]' - File: config-operator-openshift-io-v1 - - Name: 'Config [imageregistry.operator.openshift.io/v1]' - File: config-imageregistry-operator-openshift-io-v1 - - Name: 'Config [samples.operator.openshift.io/v1]' - File: config-samples-operator-openshift-io-v1 - - Name: 'CSISnapshotController [operator.openshift.io/v1]' - File: csisnapshotcontroller-operator-openshift-io-v1 - - Name: 'DNS [operator.openshift.io/v1]' - File: dns-operator-openshift-io-v1 - - Name: 'DNSRecord [ingress.operator.openshift.io/v1]' - File: dnsrecord-ingress-operator-openshift-io-v1 - - Name: 'Etcd [operator.openshift.io/v1]' - File: etcd-operator-openshift-io-v1 - - Name: 'ImageContentSourcePolicy [operator.openshift.io/v1alpha1]' - File: imagecontentsourcepolicy-operator-openshift-io-v1alpha1 - - Name: 'ImagePruner [imageregistry.operator.openshift.io/v1]' - File: imagepruner-imageregistry-operator-openshift-io-v1 - - Name: 'IngressController [operator.openshift.io/v1]' - File: ingresscontroller-operator-openshift-io-v1 - - Name: 'InsightsOperator [operator.openshift.io/v1]' - File: insightsoperator-operator-openshift-io-v1 - - Name: 'KubeAPIServer [operator.openshift.io/v1]' - File: kubeapiserver-operator-openshift-io-v1 - - Name: 'KubeControllerManager [operator.openshift.io/v1]' - File: kubecontrollermanager-operator-openshift-io-v1 - - Name: 'KubeScheduler [operator.openshift.io/v1]' - File: kubescheduler-operator-openshift-io-v1 - - Name: 'KubeStorageVersionMigrator [operator.openshift.io/v1]' - File: kubestorageversionmigrator-operator-openshift-io-v1 - - Name: 'Network [operator.openshift.io/v1]' - File: network-operator-openshift-io-v1 - - Name: 'OpenShiftAPIServer [operator.openshift.io/v1]' - File: openshiftapiserver-operator-openshift-io-v1 - - Name: 'OpenShiftControllerManager [operator.openshift.io/v1]' - File: openshiftcontrollermanager-operator-openshift-io-v1 - - Name: 'OperatorPKI [network.operator.openshift.io/v1]' - File: operatorpki-network-operator-openshift-io-v1 - - Name: 'ServiceCA [operator.openshift.io/v1]' - File: serviceca-operator-openshift-io-v1 - - Name: 'Storage [operator.openshift.io/v1]' - File: storage-operator-openshift-io-v1 -- Name: OperatorHub APIs - Dir: operatorhub_apis - Topics: - - Name: About OperatorHub APIs - File: operatorhub-apis-index - - Name: 'CatalogSource [operators.coreos.com/v1alpha1]' - File: catalogsource-operators-coreos-com-v1alpha1 - - Name: 'ClusterServiceVersion [operators.coreos.com/v1alpha1]' - File: clusterserviceversion-operators-coreos-com-v1alpha1 - - Name: 'InstallPlan [operators.coreos.com/v1alpha1]' - File: installplan-operators-coreos-com-v1alpha1 - - Name: 'OLMConfig [operators.coreos.com/v1]' - File: olmconfig-operators-coreos-com-v1 - - Name: 'Operator [operators.coreos.com/v1]' - File: operator-operators-coreos-com-v1 - - Name: 'OperatorCondition [operators.coreos.com/v2]' - File: operatorcondition-operators-coreos-com-v2 - - Name: 'OperatorGroup [operators.coreos.com/v1]' - File: operatorgroup-operators-coreos-com-v1 - - Name: 'PackageManifest [packages.operators.coreos.com/v1]' - File: packagemanifest-packages-operators-coreos-com-v1 - - Name: 'Subscription [operators.coreos.com/v1alpha1]' - File: subscription-operators-coreos-com-v1alpha1 -- Name: Policy APIs - Dir: policy_apis - Topics: - - Name: About Policy APIs - File: policy-apis-index - - Name: 'Eviction [policy/v1]' - File: eviction-policy-v1 - - Name: 'PodDisruptionBudget [policy/v1]' - File: poddisruptionbudget-policy-v1 -- Name: Project APIs - Dir: project_apis - Topics: - - Name: About Project APIs - File: project-apis-index - - Name: 'Project [project.openshift.io/v1]' - File: project-project-openshift-io-v1 - - Name: 'ProjectRequest [project.openshift.io/v1]' - File: projectrequest-project-openshift-io-v1 -- Name: Provisioning APIs - Dir: provisioning_apis - Topics: - - Name: About Provisioning APIs - File: provisioning-apis-index - - Name: 'BMCEventSubscription [metal3.io/v1alpha1]' - File: bmceventsubscription-metal3-io-v1alpha1 - - Name: 'BareMetalHost [metal3.io/v1alpha1]' - File: baremetalhost-metal3-io-v1alpha1 - - Name: 'FirmwareSchema [metal3.io/v1alpha1]' - File: firmwareschema-metal3-io-v1alpha1 - - Name: 'HardwareData [metal3.io/v1alpha1]' - File: hardwaredata-metal3-io-v1alpha1 - - Name: 'HostFirmwareSettings [metal3.io/v1alpha1]' - File: hostfirmwaresettings-metal3-io-v1alpha1 - - Name: 'Metal3Remediation [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediation-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'Metal3RemediationTemplate [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediationtemplate-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'PreprovisioningImage [metal3.io/v1alpha1]' - File: preprovisioningimage-metal3-io-v1alpha1 - - Name: 'Provisioning [metal3.io/v1alpha1]' - File: provisioning-metal3-io-v1alpha1 -- Name: RBAC APIs - Dir: rbac_apis - Topics: - - Name: About RBAC APIs - File: rbac-apis-index - - Name: 'ClusterRoleBinding [rbac.authorization.k8s.io/v1]' - File: clusterrolebinding-rbac-authorization-k8s-io-v1 - - Name: 'ClusterRole [rbac.authorization.k8s.io/v1]' - File: clusterrole-rbac-authorization-k8s-io-v1 - - Name: 'RoleBinding [rbac.authorization.k8s.io/v1]' - File: rolebinding-rbac-authorization-k8s-io-v1 - - Name: 'Role [rbac.authorization.k8s.io/v1]' - File: role-rbac-authorization-k8s-io-v1 -- Name: Role APIs - Dir: role_apis - Topics: - - Name: About Role APIs - File: role-apis-index - - Name: 'ClusterRoleBinding [authorization.openshift.io/v1]' - File: clusterrolebinding-authorization-openshift-io-v1 - - Name: 'ClusterRole [authorization.openshift.io/v1]' - File: clusterrole-authorization-openshift-io-v1 - - Name: 'RoleBindingRestriction [authorization.openshift.io/v1]' - File: rolebindingrestriction-authorization-openshift-io-v1 - - Name: 'RoleBinding [authorization.openshift.io/v1]' - File: rolebinding-authorization-openshift-io-v1 - - Name: 'Role [authorization.openshift.io/v1]' - File: role-authorization-openshift-io-v1 -- Name: Schedule and quota APIs - Dir: schedule_and_quota_apis - Topics: - - Name: About Schedule and quota APIs - File: schedule-and-quota-apis-index - - Name: 'AppliedClusterResourceQuota [quota.openshift.io/v1]' - File: appliedclusterresourcequota-quota-openshift-io-v1 - - Name: 'ClusterResourceQuota [quota.openshift.io/v1]' - File: clusterresourcequota-quota-openshift-io-v1 - - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1beta1]' - File: flowschema-flowcontrol-apiserver-k8s-io-v1beta1 - - Name: 'LimitRange [undefined/v1]' - File: limitrange-v1 - - Name: 'PriorityClass [scheduling.k8s.io/v1]' - File: priorityclass-scheduling-k8s-io-v1 - - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta1]' - File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta1 - - Name: 'ResourceQuota [undefined/v1]' - File: resourcequota-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: About Security APIs - File: security-apis-index - - Name: 'CertificateSigningRequest [certificates.k8s.io/v1]' - File: certificatesigningrequest-certificates-k8s-io-v1 - - Name: 'CredentialsRequest [cloudcredential.openshift.io/v1]' - File: credentialsrequest-cloudcredential-openshift-io-v1 - - Name: 'PodSecurityPolicyReview [security.openshift.io/v1]' - File: podsecuritypolicyreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySelfSubjectReview [security.openshift.io/v1]' - File: podsecuritypolicyselfsubjectreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySubjectReview [security.openshift.io/v1]' - File: podsecuritypolicysubjectreview-security-openshift-io-v1 - - Name: 'RangeAllocation [security.openshift.io/v1]' - File: rangeallocation-security-openshift-io-v1 - - Name: 'Secret [undefined/v1]' - File: secret-v1 - - Name: 'SecurityContextConstraints [security.openshift.io/v1]' - File: securitycontextconstraints-security-openshift-io-v1 - - Name: 'ServiceAccount [undefined/v1]' - File: serviceaccount-v1 -- Name: Storage APIs - Dir: storage_apis - Topics: - - Name: About Storage APIs - File: storage-apis-index - - Name: 'CSIDriver [storage.k8s.io/v1]' - File: csidriver-storage-k8s-io-v1 - - Name: 'CSINode [storage.k8s.io/v1]' - File: csinode-storage-k8s-io-v1 - - Name: 'CSIStorageCapacity [storage.k8s.io/v1]' - File: csistoragecapacity-storage-k8s-io-v1 - - Name: 'PersistentVolume [undefined/v1]' - File: persistentvolume-v1 - - Name: 'PersistentVolumeClaim [undefined/v1]' - File: persistentvolumeclaim-v1 - - Name: 'StorageClass [storage.k8s.io/v1]' - File: storageclass-storage-k8s-io-v1 - - Name: 'StorageState [migration.k8s.io/v1alpha1]' - File: storagestate-migration-k8s-io-v1alpha1 - - Name: 'StorageVersionMigration [migration.k8s.io/v1alpha1]' - File: storageversionmigration-migration-k8s-io-v1alpha1 - - Name: 'VolumeAttachment [storage.k8s.io/v1]' - File: volumeattachment-storage-k8s-io-v1 - - Name: 'VolumeSnapshot [snapshot.storage.k8s.io/v1]' - File: volumesnapshot-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotClass [snapshot.storage.k8s.io/v1]' - File: volumesnapshotclass-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotContent [snapshot.storage.k8s.io/v1]' - File: volumesnapshotcontent-snapshot-storage-k8s-io-v1 -- Name: Template APIs - Dir: template_apis - Topics: - - Name: About Template APIs - File: template-apis-index - - Name: 'BrokerTemplateInstance [template.openshift.io/v1]' - File: brokertemplateinstance-template-openshift-io-v1 - - Name: 'PodTemplate [undefined/v1]' - File: podtemplate-v1 - - Name: 'Template [template.openshift.io/v1]' - File: template-template-openshift-io-v1 - - Name: 'TemplateInstance [template.openshift.io/v1]' - File: templateinstance-template-openshift-io-v1 -- Name: User and group APIs - Dir: user_and_group_apis - Topics: - - Name: About User and group APIs - File: user-and-group-apis-index - - Name: 'Group [user.openshift.io/v1]' - File: group-user-openshift-io-v1 - - Name: 'Identity [user.openshift.io/v1]' - File: identity-user-openshift-io-v1 - - Name: 'UserIdentityMapping [user.openshift.io/v1]' - File: useridentitymapping-user-openshift-io-v1 - - Name: 'User [user.openshift.io/v1]' - File: user-user-openshift-io-v1 -- Name: Workloads APIs - Dir: workloads_apis - Topics: - - Name: About Workloads APIs - File: workloads-apis-index - - Name: 'BuildConfig [build.openshift.io/v1]' - File: buildconfig-build-openshift-io-v1 - - Name: 'Build [build.openshift.io/v1]' - File: build-build-openshift-io-v1 - - Name: 'BuildLog [build.openshift.io/v1]' - File: buildlog-build-openshift-io-v1 - - Name: 'BuildRequest [build.openshift.io/v1]' - File: buildrequest-build-openshift-io-v1 - - Name: 'CronJob [batch/v1]' - File: cronjob-batch-v1 - - Name: 'DaemonSet [apps/v1]' - File: daemonset-apps-v1 - - Name: 'Deployment [apps/v1]' - File: deployment-apps-v1 - - Name: 'DeploymentConfig [apps.openshift.io/v1]' - File: deploymentconfig-apps-openshift-io-v1 - - Name: 'DeploymentConfigRollback [apps.openshift.io/v1]' - File: deploymentconfigrollback-apps-openshift-io-v1 - - Name: 'DeploymentLog [apps.openshift.io/v1]' - File: deploymentlog-apps-openshift-io-v1 - - Name: 'DeploymentRequest [apps.openshift.io/v1]' - File: deploymentrequest-apps-openshift-io-v1 - - Name: 'Job [batch/v1]' - File: job-batch-v1 - - Name: 'Pod [undefined/v1]' - File: pod-v1 - - Name: 'ReplicationController [undefined/v1]' - File: replicationcontroller-v1 - - Name: 'ReplicaSet [apps/v1]' - File: replicaset-apps-v1 - - Name: 'StatefulSet [apps/v1]' - File: statefulset-apps-v1 ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-enterprise -Topics: -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -- Name: Service Mesh 1.x - Dir: v1x - Topics: - - Name: Service Mesh 1.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing Service Mesh - File: installing-ossm - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Deploying applications on Service Mesh - File: prepare-to-deploy-applications-ossm - - Name: Data visualization and observability - File: ossm-observability - - Name: Custom resources - File: ossm-custom-resources - - Name: 3scale Istio adapter for 1.x - File: threescale-adapter - - Name: Removing Service Mesh - File: removing-ossm ---- -Name: Distributed tracing -Dir: distr_tracing -Distros: openshift-enterprise -Topics: -- Name: Distributed tracing release notes - File: distributed-tracing-release-notes -- Name: Distributed tracing architecture - Dir: distr_tracing_arch - Topics: - - Name: Distributed tracing architecture - File: distr-tracing-architecture -- Name: Distributed tracing installation - Dir: distr_tracing_install - Topics: - - Name: Installing distributed tracing - File: distr-tracing-installing - - Name: Configuring the distributed tracing platform - File: distr-tracing-deploying-jaeger - - Name: Configuring distributed tracing data collection - File: distr-tracing-deploying-otel - - Name: Upgrading distributed tracing - File: distr-tracing-updating - - Name: Removing distributed tracing - File: distr-tracing-removing ---- -Name: Virtualization -Dir: virt -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About - Dir: about_virt - Topics: - - Name: About OpenShift Virtualization - File: about-virt - Distros: openshift-enterprise - - Name: About OKD Virtualization - File: about-virt - Distros: openshift-origin - - Name: Security policies - File: virt-security-policies - - Name: Architecture - File: virt-architecture - Distros: openshift-enterprise -- Name: Release notes - Dir: release_notes - Topics: - - Name: OpenShift Virtualization release notes - File: virt-release-notes-placeholder - Distros: openshift-enterprise - # - Name: OKD Virtualization release notes - # File: virt-release-notes-placeholder - # Distros: openshift-origin -- Name: Getting started - Dir: getting_started - Topics: - - Name: Getting started with OpenShift Virtualization - File: virt-getting-started - Distros: openshift-enterprise - - Name: Getting started with OKD Virtualization - File: virt-getting-started - Distros: openshift-origin - - Name: Using the virtctl and libguestfs CLI tools - File: virt-using-the-cli-tools - - Name: Web console overview - File: virt-web-console-overview - Distros: openshift-enterprise -- Name: Installing - Dir: install - Topics: - - Name: Preparing your cluster - File: preparing-cluster-for-virt - - Name: Installing OpenShift Virtualization - File: installing-virt - Distros: openshift-enterprise - - Name: Installing OKD Virtualization - File: installing-virt - Distros: openshift-origin - - Name: Specifying nodes for OpenShift Virtualization components - File: virt-specifying-nodes-for-virtualization-components - Distros: openshift-enterprise - - Name: Specifying nodes for OKD Virtualization components - File: virt-specifying-nodes-for-virtualization-components - Distros: openshift-origin - - Name: Uninstalling OpenShift Virtualization - File: uninstalling-virt - Distros: openshift-enterprise - - Name: Uninstalling OKD Virtualization - File: uninstalling-virt - Distros: openshift-origin -- Name: Updating - Dir: updating - Topics: - - Name: Updating OpenShift Virtualization - File: upgrading-virt - Distros: openshift-enterprise - - Name: Updating OKD Virtualization - File: upgrading-virt - Distros: openshift-origin -- Name: Virtual machines - Dir: virtual_machines - Topics: -###VIRTUAL MACHINE CHESS SALAD (silly name to highlight that the commented out assemblies need to be checked against merged filenams) - - Name: Creating virtual machines - File: virt-create-vms - - Name: Connecting to VM consoles - File: virt-accessing-vm-consoles - - Name: Configuring SSH access to VMs - File: virt-accessing-vm-ssh - - Name: Editing virtual machines - File: virt-edit-vms - - Name: Editing boot order - File: virt-edit-boot-order - - Name: Deleting virtual machines - File: virt-delete-vms - - Name: Exporting virtual machines - File: virt-exporting-vms - - Name: Managing virtual machine instances - File: virt-manage-vmis - - Name: Controlling virtual machine states - File: virt-controlling-vm-states - - Name: Automating Windows installation with sysprep - File: virt-automating-windows-sysprep - - Name: Installing the QEMU guest agent and VirtIO drivers - File: virt-installing-qemu-guest-agent - - Name: Viewing the QEMU guest agent information for virtual machines - File: virt-viewing-qemu-guest-agent-web - - Name: Using virtual Trusted Platform Module devices - File: virt-using-vtpm-devices - - Name: Managing virtual machines with OpenShift Pipelines - File: virt-managing-vms-openshift-pipelines - - Name: Advanced virtual machine management - Dir: advanced_vm_management - Topics: -#Advanced virtual machine configuration - - Name: Working with resource quotas for virtual machines - File: virt-working-with-resource-quotas-for-vms - - Name: Specifying nodes for virtual machines - File: virt-specifying-nodes-for-vms - - Name: Configuring certificate rotation - File: virt-configuring-certificate-rotation - - Name: Configuring the default CPU model - File: virt-configuring-default-cpu-model - - Name: UEFI mode for virtual machines - File: virt-uefi-mode-for-vms - - Name: Configuring PXE booting for virtual machines - File: virt-configuring-pxe-booting - - Name: Using huge pages with virtual machines - File: virt-using-huge-pages-with-vms - - Name: Enabling dedicated resources for a virtual machine - File: virt-dedicated-resources-vm - - Name: Scheduling virtual machines - File: virt-schedule-vms - - Name: Configuring PCI passthrough - File: virt-configuring-pci-passthrough - - Name: Configuring vGPU passthrough - File: virt-configuring-vgpu-passthrough - - Name: Configuring mediated devices - File: virt-configuring-mediated-devices - - Name: Enabling descheduler evictions on virtual machines - File: virt-enabling-descheduler-evictions - - Name: About high availability for virtual machines - File: virt-high-availability-for-vms -# Importing virtual machines - - Name: Importing virtual machines - Dir: importing_vms - Topics: - - Name: TLS certificates for data volume imports - File: virt-tls-certificates-for-dv-imports - - Name: Importing virtual machine images with data volumes - File: virt-importing-virtual-machine-images-datavolumes -# Cloning virtual machines - - Name: Cloning virtual machines - Dir: cloning_vms - Topics: - - Name: Enabling user permissions to clone data volumes across namespaces - File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Cloning a virtual machine disk into a new data volume - File: virt-cloning-vm-disk-into-new-datavolume - - Name: Cloning a virtual machine by using a data volume template - File: virt-cloning-vm-using-datavolumetemplate - - Name: Cloning a virtual machine disk into a new block storage persistent volume claim - File: virt-cloning-vm-disk-into-new-block-storage-pvc -# Virtual machine networking - - Name: Virtual machine networking - Dir: vm_networking - Topics: - - Name: Configuring a virtual machine for the default pod network - File: virt-using-the-default-pod-network-with-virt - Distros: openshift-enterprise - - Name: Configuring a virtual machine for the default pod network with OKD Virtualization - File: virt-using-the-default-pod-network-with-virt - Distros: openshift-origin - - Name: Creating a service to expose a virtual machine - File: virt-creating-service-vm - - Name: Connecting a virtual machine to a Linux bridge network - File: virt-attaching-vm-multiple-networks - - Name: Connecting a virtual machine to an SR-IOV network - File: virt-attaching-vm-to-sriov-network - - Name: Connecting a virtual machine to a service mesh - File: virt-connecting-vm-to-service-mesh - - Name: Configuring IP addresses for virtual machines - File: virt-configuring-ip-for-vms - - Name: Viewing the IP address of NICs on a virtual machine - File: virt-viewing-ip-of-vm-nic - - Name: Accessing a virtual machine on a secondary network by using the cluster domain name - File: virt-accessing-vm-secondary-network-fqdn - - Name: Using a MAC address pool for virtual machines - File: virt-using-mac-address-pool-for-vms -#A BETTER NAME THAN 'STORAGE 4 U' - - Name: Virtual machine disks - Dir: virtual_disks - Topics: - - Name: Configuring local storage for virtual machines - File: virt-configuring-local-storage-for-vms - - Name: Creating data volumes - File: virt-creating-data-volumes - - Name: Reserving PVC space for file system overhead - File: virt-reserving-pvc-space-fs-overhead - - Name: Configuring CDI to work with namespaces that have a compute resource quota - File: virt-configuring-cdi-for-namespace-resourcequota - - Name: Managing data volume annotations - File: virt-managing-data-volume-annotations - - Name: Using preallocation for data volumes - File: virt-using-preallocation-for-datavolumes - - Name: Uploading local disk images by using the web console - File: virt-uploading-local-disk-images-web - - Name: Uploading local disk images by using the virtctl tool - File: virt-uploading-local-disk-images-virtctl - - Name: Uploading a local disk image to a block storage persistent volume claim - File: virt-uploading-local-disk-images-block - - Name: Managing virtual machine snapshots - File: virt-managing-vm-snapshots - - Name: Moving a local virtual machine disk to a different node - File: virt-moving-local-vm-disk-to-different-node - - Name: Expanding virtual storage by adding blank disk images - File: virt-expanding-virtual-storage-with-blank-disk-images - - Name: Cloning a data volume using smart-cloning - File: virt-cloning-a-datavolume-using-smart-cloning - - Name: Hot plugging virtual disks - File: virt-hot-plugging-virtual-disks - - Name: Using container disks with virtual machines - File: virt-using-container-disks-with-vms - - Name: Preparing CDI scratch space - File: virt-preparing-cdi-scratch-space - - Name: Re-using statically provisioned persistent volumes - File: virt-reusing-statically-provisioned-persistent-volumes - - Name: Expanding a virtual machine disk - File: virt-expanding-vm-disk -# Templates -- Name: Virtual machine templates - Dir: vm_templates - Topics: - - Name: Creating virtual machine templates - File: virt-creating-vm-template - - Name: Editing virtual machine templates - File: virt-editing-vm-template - - Name: Enabling dedicated resources for a virtual machine template - File: virt-dedicated-resources-vm-template - - Name: Deploying a virtual machine template to a custom namespace - File: virt-deploying-vm-template-to-custom-namespace - - Name: Deleting a virtual machine template - File: virt-deleting-vm-template - - Name: Creating and using boot sources - File: virt-creating-and-using-boot-sources - - Name: Managing automatic boot source updates - File: virt-automatic-bootsource-updates - Distros: openshift-enterprise -# Virtual machine live migration -- Name: Live migration - Dir: live_migration - Topics: - - Name: Virtual machine live migration - File: virt-live-migration - - Name: Live migration limits and timeouts - File: virt-live-migration-limits - - Name: Migrating a virtual machine instance to another node - File: virt-migrate-vmi - - Name: Migrating a virtual machine over a dedicated additional network - File: virt-migrating-vm-on-secondary-network - - Name: Cancelling the live migration of a virtual machine instance - File: virt-cancel-vmi-migration - - Name: Configuring virtual machine eviction strategy - File: virt-configuring-vmi-eviction-strategy - - Name: Configuring live migration policies - File: virt-configuring-live-migration-policies -# Node maintenance mode -- Name: Node maintenance - Dir: node_maintenance - Topics: - - Name: About node maintenance - File: virt-about-node-maintenance - - Name: Automatic renewal of TLS certificates - File: virt-automatic-certificates - - Name: Managing node labeling for obsolete CPU models - File: virt-managing-node-labeling-obsolete-cpu-models - - Name: Preventing node reconciliation - File: virt-preventing-node-reconciliation - - Name: Deleting a failed node to trigger virtual machine failover - File: virt-triggering-vm-failover-resolving-failed-node -- Name: Monitoring - Dir: monitoring - Topics: - - Name: Monitoring overview - File: virt-monitoring-overview - - Name: OpenShift cluster checkup framework - File: virt-running-cluster-checkups - - Name: Prometheus queries for virtual resources - File: virt-prometheus-queries - - Name: Virtual machine custom metrics - File: virt-exposing-custom-metrics-for-vms - - Name: Virtual machine health checks - File: virt-monitoring-vm-health - - Name: Runbooks - File: virt-runbooks -- Name: Support - Dir: support - Topics: - - Name: Support overview - File: virt-support-overview - - Name: Collecting data for Red Hat Support - File: virt-collecting-virt-data - Distros: openshift-enterprise - - Name: Troubleshooting - File: virt-troubleshooting -- Name: Backup and restore - Dir: backup_restore - Topics: - - Name: Installing and configuring OADP - File: virt-installing-configuring-oadp - - Name: Backing up and restoring virtual machines - File: virt-backup-restore-overview - - Name: Backing up virtual machines - File: virt-backing-up-vms - - Name: Restoring virtual machines - File: virt-restoring-vms -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# Distros: openshift-origin ---- -Name: Serverless -Dir: serverless -Distros: openshift-enterprise -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless +- Name: Understanding image builds + File: understanding-image-builds diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml deleted file mode 100644 index b3afced7775e..000000000000 --- a/_topic_maps/_topic_map_ms.yml +++ /dev/null @@ -1,164 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: microshift_welcome -Distros: microshift -Topics: -- Name: Welcome - File: index ---- -Name: Release notes -Dir: microshift_release_notes -Distros: microshift -Topics: -- Name: MicroShift 4.14 release notes - File: microshift-4-14-release-notes ---- -Name: Getting started -Dir: microshift_getting_started -Distros: microshift -Topics: -- Name: Understanding MicroShift - File: microshift-understanding -- Name: Architecture - File: microshift-architecture ---- -Name: Installing -Dir: microshift_install -Distros: microshift -Topics: -- Name: Installing from RPM - File: microshift-install-rpm -- Name: Embedding in a RHEL for Edge image - File: microshift-embed-in-rpm-ostree -- Name: Greenboot health check - File: microshift-greenboot ---- -Name: Updating clusters -Dir: microshift_updating -Distros: microshift -Topics: -- Name: About MicroShift updates - File: microshift-about-updates ---- -Name: Support -Dir: microshift_support -Distros: microshift -Topics: -- Name: MicroShift etcd - File: microshift-etcd -- Name: MicroShift sos report - File: microshift-sos-report ---- -Name: API reference -Dir: microshift_rest_api -Distros: microshift -Topics: -- Name: Understanding API tiers - File: understanding-api-support-tiers -- Name: API compatibility guidelines - File: understanding-compatibility-guidelines -- Name: Network APIs - Dir: network_apis - Topics: - - Name: Route [route.openshift.io/v1] - File: route-route-openshift-io-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: SecurityContextConstraints [security.openshift.io/v1] - File: securitycontextconstraints-security-openshift-io-v1 ---- -Name: CLI tools -Dir: microshift_cli_ref -Distros: microshift -Topics: -- Name: CLI tools introduction - File: microshift-cli-tools-introduction -- Name: Installing the OpenShift CLI - File: microshift-oc-cli-install -- Name: Configuring the OpenShift CLI - File: microshift-oc-config -- Name: Using the OpenShift CLI - File: microshift-cli-using-oc -- Name: Using oc and kubectl - File: microshift-usage-oc-kubectl -- Name: List of oc CLI commands - File: microshift-oc-cli-commands-list ---- -Name: Configuring -Dir: microshift_configuring -Distros: microshift -Topics: -- Name: Using configuration tools - File: microshift-using-config-tools -- Name: Cluster access with kubeconfig - File: microshift-cluster-access-kubeconfig ---- -Name: Networking -Dir: microshift_networking -Distros: microshift -Topics: -- Name: Networking settings - File: microshift-networking -- Name: Firewall configuration - File: microshift-firewall ---- -Name: Storage -Dir: microshift_storage -Distros: microshift -Topics: -- Name: MicroShift storage overview - File: index -- Name: Understanding ephemeral storage for MicroShift - File: understanding-ephemeral-storage-microshift -- Name: Generic ephemeral volumes for MicroShift - File: generic-ephemeral-volumes-microshift -- Name: Understanding persistent storage for MicroShift - File: understanding-persistent-storage-microshift -- Name: Expanding persistent volumes for MicroShift - File: expanding-persistent-volumes-microshift -- Name: Dynamic storage using the LVMS plugin - File: microshift-storage-plugin-overview ---- -Name: Running applications -Dir: microshift_running_apps -Distros: microshift -Topics: -- Name: Application deployment - File: microshift-applications -- Name: Operators - File: microshift-operators -- Name: Greenboot workload health check scripts - File: microshift-greenboot-workload-scripts ---- -Name: Troubleshooting -Dir: microshift_troubleshooting -Distros: microshift -Topics: -- Name: Checking your version - File: microshift-version -- Name: Additional information - File: microshift-things-to-know diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml deleted file mode 100644 index 3ac8ea3e5c4d..000000000000 --- a/_topic_maps/_topic_map_osd.yml +++ /dev/null @@ -1,411 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-dedicated -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice - Distros: openshift-dedicated ---- -Name: Introduction to OpenShift Dedicated -Dir: osd_architecture -Distros: openshift-dedicated -Topics: -- Name: Understanding OpenShift Dedicated - File: osd-understanding -- Name: Architecture concepts - File: osd-architecture -- Name: Policies and service definition - Dir: osd_policy - Distros: openshift-dedicated - Topics: - - Name: OpenShift Dedicated service definition - File: osd-service-definition - - Name: Responsibility assignment matrix - File: policy-responsibility-matrix - - Name: Understanding process and security for OpenShift Dedicated - File: policy-process-security - - Name: About availability for OpenShift Dedicated - File: policy-understand-availability - - Name: Update life cycle - File: osd-life-cycle -- Name: Support for OpenShift Dedicated - File: osd-support - Distros: openshift-dedicated ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-dedicated -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview ---- -Name: Planning your environment -Dir: osd_planning -Distros: openshift-dedicated -Topics: -- Name: Limits and scalability - File: osd-limits-scalability -- Name: Customer Cloud Subscriptions on AWS - File: aws-ccs -- Name: Customer Cloud Subscriptions on GCP - File: gcp-ccs ---- -Name: Getting started -Dir: osd_getting_started -Distros: openshift-dedicated -Topics: -- Name: Understanding your cloud deployment options - File: osd-understanding-your-cloud-deployment-options -- Name: Getting started with OpenShift Dedicated - File: osd-getting-started ---- -Name: Installing, accessing, and deleting OpenShift Dedicated clusters -Dir: osd_install_access_delete_cluster -Distros: openshift-dedicated -Topics: -- Name: Creating a cluster on AWS - File: creating-an-aws-cluster -- Name: Creating a cluster on GCP - File: creating-a-gcp-cluster -- Name: Configuring your identity providers - File: config-identity-providers -- Name: Revoking privileges and access to an OpenShift Dedicated cluster - File: osd-revoking-cluster-privileges -- Name: Deleting an OpenShift Dedicated cluster - File: osd-deleting-a-cluster ---- -Name: Cluster administration -Dir: osd_cluster_admin -Distros: openshift-dedicated -Topics: -- Name: Managing administration roles and users - File: osd-admin-roles -- Name: Configuring private connections - Dir: osd_private_connections - Distros: openshift-dedicated - Topics: - - Name: Configuring private connections for AWS - File: aws-private-connections - - Name: Configuring a private cluster - File: private-cluster -- Name: Nodes - Dir: osd_nodes - Distros: openshift-dedicated - Topics: - - Name: About machine pools - File: osd-nodes-machinepools-about - - Name: Managing compute nodes - File: osd-managing-worker-nodes - - Name: About autoscaling nodes on a cluster - File: osd-nodes-about-autoscaling-nodes -- Name: Logging - Dir: osd_logging - Distros: openshift-dedicated - Topics: - - Name: Accessing the service logs - File: osd-accessing-the-service-logs ---- -# Name: Security and compliance -# Dir: security -# Distros: openshift-dedicated -# Topics: -# - Name: Viewing audit logs -# File: audit-log-view -# --- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-dedicated -Topics: -- Name: Managing security context constraints - File: managing-security-context-constraints ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-dedicated -Topics: -- Name: Preparing to upgrade OpenShift Dedicated to 4.9 - File: osd-upgrading-cluster-prepare - Distros: openshift-dedicated -- Name: Upgrading OpenShift Dedicated - File: osd-upgrades - Distros: openshift-dedicated ---- -Name: CI/CD -Dir: cicd -Distros: openshift-dedicated -Topics: -- Name: Builds - Dir: builds - Distros: openshift-dedicated - Topics: - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-dedicated ---- -Name: Add-on services -Dir: adding_service_cluster -Distros: openshift-dedicated -Topics: -- Name: Adding services to a cluster - File: adding-service -- Name: Available services - File: available-services - Distros: openshift-dedicated ---- -Name: Storage -Dir: storage -Distros: openshift-dedicated -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: osd-persistent-storage-aws-efs-csi - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - #- Name: GCP Filestore CSI Driver Operator - # File: persistent-storage-csi-google-cloud-file -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-dedicated -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Dedicated - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry ---- -Name: Networking -Dir: networking -Distros: openshift-dedicated -Topics: -- Name: Understanding the DNS Operator - File: dns-operator -- Name: Understanding the Ingress Operator - File: ingress-operator -- Name: OpenShift SDN default CNI network provider - Dir: openshift_sdn - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Applications -Dir: applications -Distros: openshift-dedicated -Topics: -- Name: Deployments - Dir: deployments - Distros: openshift-dedicated - Topics: - - Name: Custom domains for applications - File: osd-config-custom-domains-applications ---- -Name: Logging -Dir: logging -Distros: openshift-dedicated -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying -- Name: Accessing the service logs - File: sd-accessing-the-service-logs -- Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer - Distros: openshift-dedicated -- Name: Forwarding logs to third party systems - File: cluster-logging-external -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -# Distros: openshift-dedicated -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields ---- -Name: Monitoring -Dir: monitoring -Distros: openshift-dedicated -Topics: -- Name: Monitoring overview - File: monitoring-overview -- Name: Accessing monitoring for user-defined projects - File: sd-accessing-monitoring-for-user-defined-projects -- Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack -- Name: Disabling monitoring for user-defined projects - File: sd-disabling-monitoring-for-user-defined-projects -- Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: managing-metrics -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards -- Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis -- Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues -- Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator ---- -Name: Serverless -Dir: serverless -Distros: openshift-dedicated -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Troubleshooting -Dir: sd_support -Distros: openshift-dedicated -Topics: -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-dedicated - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster -- Name: Summarizing cluster specifications - File: osd-summarizing-cluster-specifications - Distros: openshift-dedicated -- Name: OpenShift Dedicated managed resources - File: osd-managed-resources - Distros: openshift-dedicated diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml deleted file mode 100644 index cf1802dc1e3c..000000000000 --- a/_topic_maps/_topic_map_rosa.yml +++ /dev/null @@ -1,617 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-rosa -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice - Distros: openshift-rosa ---- -Name: What's new -Dir: rosa_release_notes -Distros: openshift-rosa -Topics: -- Name: What's new with ROSA - File: rosa-release-notes ---- -Name: Introduction to ROSA -Dir: rosa_architecture -Distros: openshift-rosa -Topics: -- Name: Understanding ROSA - File: rosa-understanding -- Name: ROSA architecture - Dir: rosa_architecture_sub - Distros: openshift-rosa - Topics: - - Name: Architecture concepts - File: rosa-basic-architecture-concepts - - Name: Architecture models - File: rosa-architecture-models -- Name: Policies and service definition - Dir: rosa_policy_service_definition - Distros: openshift-rosa - Topics: - - Name: About availability for ROSA - File: rosa-policy-understand-availability - - Name: Overview of responsibilities for ROSA - File: rosa-policy-responsibility-matrix - - Name: ROSA service definition - File: rosa-service-definition - - Name: ROSA update life cycle - File: rosa-life-cycle - - Name: Understanding process and security for ROSA - File: rosa-policy-process-security -- Name: About IAM resources for ROSA with STS - File: rosa-sts-about-iam-resources -- Name: OpenID Connect Overview - File: rosa-oidc-overview -- Name: Support for ROSA - File: rosa-getting-support -# - Name: Training for ROSA -# File: rosa-training ---- -Name: Getting started -Dir: rosa_getting_started -Distros: openshift-rosa -Topics: -- Name: ROSA quickstart guide - File: rosa-quickstart-guide-ui -- Name: Comprehensive guide to getting started with ROSA - File: rosa-getting-started -- Name: Understanding the ROSA with STS deployment workflow - File: rosa-sts-getting-started-workflow ---- -Name: Prepare your environment -Dir: rosa_planning -Distros: openshift-rosa -Topics: -- Name: AWS prerequisites for ROSA with STS - File: rosa-sts-aws-prereqs -- Name: ROSA IAM role resources - File: rosa-sts-ocm-role -- Name: Limits and scalability - File: rosa-limits-scalability -- Name: Planning your environment - File: rosa-planning-environment -- Name: Required AWS service quotas - File: rosa-sts-required-aws-service-quotas -- Name: Setting up your environment - File: rosa-sts-setting-up-environment ---- -Name: Install ROSA with HCP clusters -Dir: rosa_hcp -Distros: openshift-rosa -Topics: -- Name: Creating ROSA with HCP clusters using the default options - File: rosa-hcp-sts-creating-a-cluster-quickly -- Name: Using the Node Tuning Operator on ROSA with HCP - File: rosa-tuning-config ---- -Name: Install ROSA Classic clusters -Dir: rosa_install_access_delete_clusters -Distros: openshift-rosa -Topics: -- Name: Creating a ROSA cluster with STS using the default options - File: rosa-sts-creating-a-cluster-quickly -- Name: Creating a ROSA cluster with STS using customizations - File: rosa-sts-creating-a-cluster-with-customizations -- Name: Interactive cluster creation mode reference - File: rosa-sts-interactive-mode-reference -- Name: Creating an AWS PrivateLink cluster on ROSA - File: rosa-aws-privatelink-creating-cluster -- Name: Accessing a ROSA cluster - File: rosa-sts-accessing-cluster -- Name: Configuring identity providers using Red Hat OpenShift Cluster Manager - File: rosa-sts-config-identity-providers -- Name: Revoking access to a ROSA cluster - File: rosa-sts-deleting-access-cluster -- Name: Deleting a ROSA cluster - File: rosa-sts-deleting-cluster -- Name: Deploying ROSA without AWS STS - Dir: rosa_getting_started_iam - Distros: openshift-rosa - Topics: - - Name: AWS prerequisites for ROSA - File: rosa-aws-prereqs - - Name: Understanding the ROSA deployment workflow - File: rosa-getting-started-workflow - - Name: Required AWS service quotas - File: rosa-required-aws-service-quotas - - Name: Configuring your AWS account - File: rosa-config-aws-account - - Name: Installing the ROSA CLI - File: rosa-installing-rosa - - Name: Creating a ROSA cluster without AWS STS - File: rosa-creating-cluster - - Name: Configuring a private cluster - File: rosa-private-cluster -# - Name: Creating a ROSA cluster using the web console -# File: rosa-creating-cluster-console -# - Name: Accessing a ROSA cluster -# File: rosa-accessing-cluster -# - Name: Configuring identity providers using the Red Hat OpenShift Cluster Manager -# File: rosa-config-identity-providers - - Name: Deleting access to a ROSA cluster - File: rosa-deleting-access-cluster - - Name: Deleting a ROSA cluster - File: rosa-deleting-cluster - - Name: Command quick reference for creating clusters and users - File: rosa-quickstart ---- -Name: ROSA CLI -Dir: rosa_cli -Distros: openshift-rosa -Topics: -# - Name: CLI and web console -# File: rosa-cli-openshift-console -- Name: Getting started with the ROSA CLI - File: rosa-get-started-cli -- Name: Managing objects with the ROSA CLI - File: rosa-manage-objects-cli -- Name: Checking account and version information with the ROSA CLI - File: rosa-checking-acct-version-cli -- Name: Checking logs with the ROSA CLI - File: rosa-checking-logs-cli ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-rosa -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Using the OpenShift web console -# File: rosa-using-openshift-console ---- -Name: Cluster administration -Dir: rosa_cluster_admin -Distros: openshift-rosa -Topics: -# - Name: Cluster configurations -# File: rosa-cluster-config -# - Name: Cluster authentication -# File: rosa-cluster-auth -# - Name: Authorization and RBAC -# File: rosa-auth-rbac -- Name: Configuring private connections - Dir: cloud_infrastructure_access - Distros: openshift-rosa - Topics: - - Name: Configuring private connections - File: rosa-configuring-private-connections - - Name: Configuring AWS VPC peering - File: dedicated-aws-peering - - Name: Configuring AWS VPN - File: dedicated-aws-vpn - - Name: Configuring AWS Direct Connect - File: dedicated-aws-dc -- Name: Nodes - Dir: rosa_nodes - Distros: openshift-rosa - Topics: - - Name: About machine pools - File: rosa-nodes-machinepools-about - - Name: Managing compute nodes - File: rosa-managing-worker-nodes - - Name: Configuring machine pools in Local Zones - File: rosa-nodes-machinepools-configuring - Distros: openshift-rosa - - Name: About autoscaling nodes on a cluster - File: rosa-nodes-about-autoscaling-nodes ---- -# Name: Security and compliance -# Dir: security -# Distros: openshift-rosa -# Topics: -# - Name: Viewing audit logs -# File: audit-log-view -# # - Name: Security -# # File: rosa-security -# # - Name: Application and cluster compliance -# # File: rosa-app-security-compliance -# --- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-rosa -Topics: -- Name: Assuming an AWS IAM role for a service account - File: assuming-an-aws-iam-role-for-a-service-account -- Name: Managing security context constraints - File: managing-security-context-constraints ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-rosa -Topics: -- Name: Preparing to upgrade ROSA to 4.9 - File: rosa-upgrading-cluster-prepare - Distros: openshift-rosa -- Name: Upgrading ROSA with STS - File: rosa-upgrading-sts -- Name: Upgrading ROSA - File: rosa-upgrading -- Name: Upgrading ROSA with HCP - File: rosa-hcp-upgrading ---- -Name: CI/CD -Dir: cicd -Distros: openshift-rosa -Topics: -- Name: Builds - Dir: builds - Distros: openshift-rosa - Topics: - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-rosa ---- - Name: Add-on services - Dir: adding_service_cluster - Distros: openshift-rosa - Topics: - - Name: Adding services to a cluster - File: adding-service - - Name: Available services - File: rosa-available-services ---- -Name: Storage -Dir: storage -Distros: openshift-rosa -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: osd-persistent-storage-aws-efs-csi -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-rosa -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in Red Hat OpenShift Service on AWS - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry ---- -Name: Networking -Dir: networking -Distros: openshift-rosa -Topics: -- Name: Understanding the DNS Operator - File: dns-operator -- Name: Understanding the Ingress Operator - File: ingress-operator -- Name: OpenShift SDN default CNI network provider - Dir: openshift_sdn - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Application development -Dir: applications -Distros: openshift-rosa -Topics: -- Name: Deployments - Dir: deployments - Distros: openshift-rosa - Topics: - - Name: Custom domains for applications - File: osd-config-custom-domains-applications -# - Name: Application GitOps workflows -# File: rosa-app-gitops-workflows -# - Name: Application logging -# File: rosa-app-logging -# - Name: Applications -# File: rosa-apps -# - Name: Application metrics and alerts -# File: rosa-app-metrics and alerts -# - Name: Projects -# File: rosa-projects -# - Name: Using the internal registry -# File: rosa-using-internal-registry ---- -Name: Backing up and restoring applications -Dir: rosa_backing_up_and_restoring_applications -Distros: openshift-rosa -Topics: -- Name: Installing OADP on ROSA with STS - File: backing-up-applications ---- -Name: Logging -Dir: logging -Distros: openshift-rosa -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying -- Name: Accessing the service logs - File: sd-accessing-the-service-logs -- Name: Viewing cluster logs in the AWS Console - File: rosa-viewing-logs -- Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer -- Name: Forwarding logs to third party systems - File: cluster-logging-external -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields ---- -Name: Monitoring -Dir: monitoring -Distros: openshift-rosa -Topics: -- Name: Monitoring overview - File: monitoring-overview -- Name: Accessing monitoring for user-defined projects - File: sd-accessing-monitoring-for-user-defined-projects -- Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack -- Name: Disabling monitoring for user-defined projects - File: sd-disabling-monitoring-for-user-defined-projects -- Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: managing-metrics -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards -- Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis -- Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues -- Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-rosa -Topics: -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -- Name: Service Mesh 1.x - Dir: v1x - Topics: - - Name: Service Mesh 1.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing Service Mesh - File: installing-ossm - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Deploying applications on Service Mesh - File: prepare-to-deploy-applications-ossm - - Name: Data visualization and observability - File: ossm-observability - - Name: Custom resources - File: ossm-custom-resources - - Name: 3scale Istio adapter for 1.x - File: threescale-adapter - - Name: Removing Service Mesh - File: removing-ossm ---- -Name: Serverless -Dir: serverless -Distros: openshift-rosa -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Troubleshooting -Dir: sd_support -Distros: openshift-rosa -Topics: -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster -- Name: Troubleshooting expired offline access tokens - File: rosa-troubleshooting-expired-tokens -- Name: Troubleshooting installations - File: rosa-troubleshooting-installations -- Name: Troubleshooting IAM roles - File: rosa-troubleshooting-iam-resources -- Name: Troubleshooting cluster deployments - File: rosa-troubleshooting-deployments -- Name: Red Hat OpenShift Service on AWS managed resources - File: rosa-managed-resources diff --git a/_unused_topics/README b/_unused_topics/README deleted file mode 100644 index 5636d8245a15..000000000000 --- a/_unused_topics/README +++ /dev/null @@ -1,2 +0,0 @@ -Placeholder file. Any modules that are not included will be placed here -by the `scripts/find_unused.py` script. diff --git a/_unused_topics/adding-new-devices.adoc b/_unused_topics/adding-new-devices.adoc deleted file mode 100644 index 84ec3221b6e7..000000000000 --- a/_unused_topics/adding-new-devices.adoc +++ /dev/null @@ -1,13 +0,0 @@ -[id="adding-new-devices_{context}"] -= Adding new devices - -Adding a new device is semi-automatic. The provisioner periodically checks for new mounts in configured directories. Administrators must create a new subdirectory, mount a device, and allow Pods to use the device by applying the SELinux label, for example: - ----- -$ chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/ ----- - -[WARNING] -==== -Omitting any of these steps may result in the wrong PV being created. -==== diff --git a/_unused_topics/architecture-new-content.adoc b/_unused_topics/architecture-new-content.adoc deleted file mode 100644 index 83d9ddbc1221..000000000000 --- a/_unused_topics/architecture-new-content.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -[id="architecture-updates_{context}"] -= Additional architecture content - -//Please add additional architecture content for the 4.0 release to this file. -//The docs team will edit the content and modularize it to fit the rest of -//the collection. \ No newline at end of file diff --git a/_unused_topics/builds-output-image-digest.adoc b/_unused_topics/builds-output-image-digest.adoc deleted file mode 100644 index c4bb8b1e0aa0..000000000000 --- a/_unused_topics/builds-output-image-digest.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * unused_topics/builds-output-image-digest - -[id="builds-output-image-digest_{context}"] -= Output image digest - -Built images can be uniquely identified by their digest, which can -later be used to pull the image by digest regardless of its current tag. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -`Docker` and -endif::[] -`Source-to-Image (S2I)` builds store the digest in -`Build.status.output.to.imageDigest` after the image is pushed to a registry. -The digest is computed by the registry. Therefore, it may not always be present, -for example when the registry did not return a digest, or when the builder image -did not understand its format. - -.Built Image Digest After a Successful Push to the Registry -[source,yaml] ----- -status: - output: - to: - imageDigest: sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 ----- - -[role="_additional-resources"] -.Additional resources -* link:https://docs.docker.com/registry/spec/api/#/content-digests[Docker Registry HTTP API V2: digest] -* link:https://docs.docker.com/engine/reference/commandline/pull/#/pull-an-image-by-digest-immutable-identifier[`docker pull`: pull the image by digest] diff --git a/_unused_topics/cluster-logging-collector-envvar.adoc b/_unused_topics/cluster-logging-collector-envvar.adoc deleted file mode 100644 index d1a96e696399..000000000000 --- a/_unused_topics/cluster-logging-collector-envvar.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -[id="cluster-logging-collector-envvar_{context}"] -= Configuring the logging collector using environment variables - -You can use environment variables to modify the configuration of the Fluentd log -collector. - -See the link:https://github.com/openshift/origin-aggregated-logging/blob/master/fluentd/README.md[Fluentd README] in Github for lists of the -available environment variables. - -.Prerequisites - -* Set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. - -.Procedure - -Set any of the Fluentd environment variables as needed: - ----- -$ oc set env ds/fluentd = ----- - -For example: - ----- -$ oc set env ds/fluentd BUFFER_SIZE_LIMIT=24 ----- diff --git a/_unused_topics/cluster-logging-configuring-node-selector.adoc b/_unused_topics/cluster-logging-configuring-node-selector.adoc deleted file mode 100644 index 05a470114490..000000000000 --- a/_unused_topics/cluster-logging-configuring-node-selector.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-configuring-node-selector_{context}"] -= Specifying a node for cluster logging components using node selectors - -Each component specification allows the component to target a specific node. - -.Procedure - -Edit the Cluster Logging Custom Resource (CR) in the `openshift-logging` project: - -[source,yaml] ----- -$ oc edit ClusterLogging instance - -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "nodeselector" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeSelector: <1> - logging: es - nodeCount: 3 - resources: - limits: - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - storage: - size: "20G" - storageClassName: "gp2" - redundancyPolicy: "ZeroRedundancy" - visualization: - type: "kibana" - kibana: - nodeSelector: <2> - logging: kibana - replicas: 1 - curation: - type: "curator" - curator: - nodeSelector: <3> - logging: curator - schedule: "*/10 * * * *" - collection: - logs: - type: "fluentd" - fluentd: - nodeSelector: <4> - logging: fluentd ----- - -<1> Node selector for Elasticsearch. -<2> Node selector for Kibana. -<3> Node selector for Curator. -<4> Node selector for Fluentd. - - diff --git a/_unused_topics/cluster-logging-elasticsearch-admin.adoc b/_unused_topics/cluster-logging-elasticsearch-admin.adoc deleted file mode 100644 index b1b3843deb19..000000000000 --- a/_unused_topics/cluster-logging-elasticsearch-admin.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-elasticsearch-admin_{context}"] -= Performing administrative Elasticsearch operations - -An administrator certificate, key, and CA that can be used to communicate with and perform administrative operations on Elasticsearch are provided within the *elasticsearch* secret in the `openshift-logging` project. - -[NOTE] -==== -To confirm whether your OpenShift Logging installation provides these, run: ----- -$ oc describe secret elasticsearch -n openshift-logging ----- -==== - -. Connect to an Elasticsearch pod that is in the cluster on which you are attempting to perform maintenance. - -. To find a pod in a cluster use: -+ ----- -$ oc get pods -l component=elasticsearch -o name -n openshift-logging | head -1 ----- - -. Connect to a pod: -+ ----- -$ oc rsh ----- - -. Once connected to an Elasticsearch container, you can use the certificates mounted from the secret to communicate with Elasticsearch per its link:https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices.html[Indices APIs documentation]. -+ -Fluentd sends its logs to Elasticsearch using the index format *infra-00000x* or *app-00000x*. -+ -For example, to delete all logs for the openshift-logging index, *app-000001*, we can run: -+ ----- -$ curl --key /etc/elasticsearch/secret/admin-key \ ---cert /etc/elasticsearch/secret/admin-cert \ ---cacert /etc/elasticsearch/secret/admin-ca -XDELETE \ -"https://localhost:9200/app-000001" ----- diff --git a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc deleted file mode 100644 index 3223ed28b26e..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-aushape_{context}"] -= Aushape exported fields - -These are the Aushape fields exported by OpenShift Logging available for searching -from Elasticsearch and Kibana. - -Audit events converted with Aushape. For more information, see -link:https://github.com/Scribery/aushape[Aushape]. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `aushape.serial` -|Audit event serial number. - -| `aushape.node` -|Name of the host where the audit event occurred. - -| `aushape.error` -|The error aushape encountered while converting the event. - -| `aushape.trimmed` -|An array of JSONPath expressions relative to the event object, specifying -objects or arrays with the content removed as the result of event size limiting. -An empty string means the event removed the content, and an empty array means -the trimming occurred by unspecified objects and arrays. - -| `aushape.text` -|An array log record strings representing the original audit event. -|=== - -[discrete] -[id="exported-fields-aushape.data_{context}"] -=== `aushape.data` Fields - -Parsed audit event data related to Aushape. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `aushape.data.avc` -|type: nested - -| `aushape.data.execve` -|type: string - -| `aushape.data.netfilter_cfg` -|type: nested - -| `aushape.data.obj_pid` -|type: nested - -| `aushape.data.path` -|type: nested -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-collectd.adoc b/_unused_topics/cluster-logging-exported-fields-collectd.adoc deleted file mode 100644 index 75dfb4c71428..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-collectd.adoc +++ /dev/null @@ -1,993 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-collectd_{context}"] -= `collectd` exported fields - -These are the `collectd` and `collectd-*` fields exported by the logging system and available for searching -from Elasticsearch and Kibana. - -[discrete] -[id="exported-fields-collectd_{context}"] -=== `collectd` Fields - -The following fields represent namespace metrics metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interval` -|type: float - -The `collectd` interval. - -| `collectd.plugin` -|type: string - -The `collectd` plug-in. - -| `collectd.plugin_instance` -|type: string - -The `collectd` plugin_instance. - -| `collectd.type_instance` -|type: string - -The `collectd` `type_instance`. - -| `collectd.type` -|type: string - -The `collectd` type. - -| `collectd.dstypes` -|type: string - -The `collectd` dstypes. -|=== - -[discrete] -[id="exported-fields-collectd.processes_{context}"] -=== `collectd.processes` Fields - -The following field corresponds to the `collectd` processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_state` -|type: integer -The `collectd ps_state` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_ops_{context}"] -=== `collectd.processes.ps_disk_ops` Fields - -The `collectd` `ps_disk_ops` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_ops.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_ops.write` -|type: float - -`TODO` - -| `collectd.processes.ps_vm` -|type: integer - -The `collectd` `ps_vm` type of processes plug-in. - -| `collectd.processes.ps_rss` -|type: integer - -The `collectd` `ps_rss` type of processes plug-in. - -| `collectd.processes.ps_data` -|type: integer - -The `collectd` `ps_data` type of processes plug-in. - -| `collectd.processes.ps_code` -|type: integer - -The `collectd` `ps_code` type of processes plug-in. - -| `collectd.processes.ps_stacksize` -| type: integer - -The `collectd` `ps_stacksize` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_cputime_{context}"] -=== `collectd.processes.ps_cputime` Fields - -The `collectd` `ps_cputime` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_cputime.user` -|type: float - -`TODO` - -| `collectd.processes.ps_cputime.syst` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_count_{context}"] -=== `collectd.processes.ps_count` Fields - -The `collectd` `ps_count` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_count.processes` -|type: integer - -`TODO` - -| `collectd.processes.ps_count.threads` -|type: integer - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_pagefaults_{context}"] -=== `collectd.processes.ps_pagefaults` Fields - -The `collectd` `ps_pagefaults` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_pagefaults.majflt` -|type: float - -`TODO` - -| `collectd.processes.ps_pagefaults.minflt` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_octets_{context}"] -=== `collectd.processes.ps_disk_octets` Fields - -The `collectd ps_disk_octets` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_octets.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_octets.write` -|type: float - -`TODO` - -| `collectd.processes.fork_rate` -|type: float - -The `collectd` `fork_rate` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk_{context}"] -=== `collectd.disk` Fields - -Corresponds to `collectd` disk plug-in. - -[discrete] -[id="exported-fields-collectd.disk.disk_merged_{context}"] -=== `collectd.disk.disk_merged` Fields - -The `collectd` `disk_merged` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_merged.read` -|type: float - -`TODO` - -| `collectd.disk.disk_merged.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_octets_{context}"] -=== `collectd.disk.disk_octets` Fields - -The `collectd` `disk_octets` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_octets.read` -|type: float - -`TODO` - -| `collectd.disk.disk_octets.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_time_{context}"] -=== `collectd.disk.disk_time` Fields - -The `collectd` `disk_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_time.read` -|type: float - -`TODO` - -| `collectd.disk.disk_time.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_ops_{context}"] -=== `collectd.disk.disk_ops` Fields - -The `collectd` `disk_ops` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_ops.read` -|type: float - -`TODO` - -| `collectd.disk.disk_ops.write` -|type: float - -`TODO` - -| `collectd.disk.pending_operations` -|type: integer - -The `collectd` `pending_operations` type of disk plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_io_time_{context}"] -=== `collectd.disk.disk_io_time` Fields - -The `collectd disk_io_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_io_time.io_time` -|type: float - -`TODO` - -| `collectd.disk.disk_io_time.weighted_io_time` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface_{context}"] -=== `collectd.interface` Fields - -Corresponds to the `collectd` interface plug-in. - -[discrete] -[id="exported-fields-collectd.interface.if_octets_{context}"] -=== `collectd.interface.if_octets` Fields - -The `collectd` `if_octets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_octets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_packets_{context}"] -=== `collectd.interface.if_packets` Fields - -The `collectd` `if_packets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_packets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_errors_{context}"] -=== `collectd.interface.if_errors` Fields - -The `collectd` `if_errors` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_errors.rx` -|type: float - -`TODO` - -| `collectd.interface.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_dropped_{context}"] -=== collectd.interface.if_dropped Fields - -The `collectd` `if_dropped` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.interface.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt_{context}"] -=== `collectd.virt` Fields - -Corresponds to `collectd` virt plug-in. - -[discrete] -[id="exported-fields-collectd.virt.if_octets_{context}"] -=== `collectd.virt.if_octets` Fields - -The `collectd if_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_octets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_packets_{context}"] -=== `collectd.virt.if_packets` Fields - -The `collectd` `if_packets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_packets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_errors_{context}"] -=== `collectd.virt.if_errors` Fields - -The `collectd` `if_errors` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_errors.rx` -|type: float - -`TODO` - -| `collectd.virt.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_dropped_{context}"] -=== `collectd.virt.if_dropped` Fields - -The `collectd` `if_dropped` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.virt.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_ops_{context}"] -=== `collectd.virt.disk_ops` Fields - -The `collectd` `disk_ops` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| collectd.virt.disk_ops.read -|type: float - -`TODO` - -| `collectd.virt.disk_ops.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_octets_{context}"] -=== `collectd.virt.disk_octets` Fields - -The `collectd` `disk_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_octets.read` -|type: float - -`TODO` - -| `collectd.virt.disk_octets.write` -|type: float - -`TODO` - -| `collectd.virt.memory` -|type: float - -The `collectd` memory type of virt plug-in. - -| `collectd.virt.virt_vcpu` -|type: float - -The `collectd` `virt_vcpu` type of virt plug-in. - -| `collectd.virt.virt_cpu_total` -|type: float - -The `collectd` `virt_cpu_total` type of virt plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.CPU_{context}"] -=== `collectd.CPU` Fields - -Corresponds to the `collectd` CPU plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.CPU.percent` -|type: float - -The `collectd` type percent of plug-in CPU. -|=== - -[discrete] -[id="exported-fields-collectd.df_{context}"] -=== collectd.df Fields - -Corresponds to the `collectd` `df` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.df.df_complex` -|type: float - -The `collectd` type `df_complex` of plug-in `df`. - -| `collectd.df.percent_bytes` -|type: float - -The `collectd` type `percent_bytes` of plug-in `df`. -|=== - -[discrete] -[id="exported-fields-collectd.entropy_{context}"] -=== `collectd.entropy` Fields - -Corresponds to the `collectd` entropy plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.entropy.entropy` -|type: integer - -The `collectd` entropy type of entropy plug-in. -|=== - -//// -[discrete] -[id="exported-fields-collectd.nfs_{context}"] -=== `collectd.nfs` Fields - -Corresponds to the `collectd` NFS plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.nfs.nfs_procedure` -|type: integer - -The `collectd` `nfs_procedure` type of nfs plug-in. -|=== -//// - -[discrete] -[id="exported-fields-collectd.memory_{context}"] -=== `collectd.memory` Fields - -Corresponds to the `collectd` memory plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.memory.memory` -|type: float - -The `collectd` memory type of memory plug-in. - -| `collectd.memory.percent` -|type: float - -The `collectd` percent type of memory plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.swap_{context}"] -=== `collectd.swap` Fields - -Corresponds to the `collectd` swap plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.swap.swap` -|type: integer - -The `collectd` swap type of swap plug-in. - -| `collectd.swap.swap_io` -|type: integer - -The `collectd swap_io` type of swap plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.load_{context}"] -=== `collectd.load` Fields - -Corresponds to the `collectd` load plug-in. - -[discrete] -[id="exported-fields-collectd.load.load_{context}"] -=== `collectd.load.load` Fields - -The `collectd` load type of load plug-in - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.load.load.shortterm` -|type: float - -`TODO` - -| `collectd.load.load.midterm` -|type: float - -`TODO` - -| `collectd.load.load.longterm` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.aggregation_{context}"] -=== `collectd.aggregation` Fields - -Corresponds to `collectd` aggregation plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.aggregation.percent` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.statsd_{context}"] -=== `collectd.statsd` Fields - -Corresponds to `collectd` `statsd` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.statsd.host_cpu` -|type: integer - -The `collectd` CPU type of `statsd` plug-in. - -| `collectd.statsd.host_elapsed_time` -|type: integer - -The `collectd` `elapsed_time` type of `statsd` plug-in. - -| `collectd.statsd.host_memory` -|type: integer - -The `collectd` memory type of `statsd` plug-in. - -| `collectd.statsd.host_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx` -|type: integer - -The `collectd` `nic_rx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx` -|type: integer - -The `collectd` `nic_tx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_storage` -|type: integer - -The `collectd` storage type of `statsd` plug-in. - -| `collectd.statsd.host_swap` -|type: integer - -The `collectd` swap type of `statsd` plug-in. - -| `collectd.statsd.host_vdsm` -|type: integer - -The `collectd` VDSM type of `statsd` plug-in. - -| `collectd.statsd.host_vms` -|type: integer - -The `collectd` VMS type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_bytes` -|type: integer - -The `collectd` `nic_rx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_bytes` -|type: integer - -The `collectd` `nic_tx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_min` -|type: integer - -The `collectd` `balloon_min` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_max` -|type: integer - -The `collectd` `balloon_max` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_target` -|type: integer - -The `collectd` `balloon_target` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_cur` -| type: integer - -The `collectd` `balloon_cur` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_sys` -|type: integer - -The `collectd` `cpu_sys` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_usage` -|type: integer - -The `collectd` `cpu_usage` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_ops` -|type: integer - -The `collectd` `disk_read_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_ops` -|type: integer - -The collectd` `disk_write_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_flush_latency` -|type: integer - -The `collectd` `disk_flush_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_apparent_size` -|type: integer - -The `collectd` `disk_apparent_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_bytes` -|type: integer - -The `collectd` `disk_write_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_rate` -|type: integer - -The `collectd` `disk_write_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_true_size` -|type: integer - -The `collectd` `disk_true_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_rate` -|type: integer - -The `collectd` `disk_read_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_latency` -|type: integer - -The `collectd` `disk_write_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_latency` -|type: integer - -The `collectd` `disk_read_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_bytes` -|type: integer - -The `collectd` `disk_read_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_user` -|type: integer - -The `collectd` `cpu_user` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.postgresql_{context}"] -=== `collectd.postgresql Fields` - -Corresponds to `collectd` `postgresql` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.postgresql.pg_n_tup_g` -|type: integer - -The `collectd` type `pg_n_tup_g` of plug-in postgresql. - -| `collectd.postgresql.pg_n_tup_c` -|type: integer - -The `collectd` type `pg_n_tup_c` of plug-in postgresql. - -| `collectd.postgresql.pg_numbackends` -|type: integer - -The `collectd` type `pg_numbackends` of plug-in postgresql. - -| `collectd.postgresql.pg_xact` -|type: integer - -The `collectd` type `pg_xact` of plug-in postgresql. - -| `collectd.postgresql.pg_db_size` -|type: integer - -The `collectd` type `pg_db_size` of plug-in postgresql. - -| `collectd.postgresql.pg_blks` -|type: integer - -The `collectd` type `pg_blks` of plug-in postgresql. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc deleted file mode 100644 index d893b804f0cc..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-container_{context}"] -= Container exported fields - -These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. -Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. - - -[discrete] -[id="exported-fields-pipeline_metadata.collector_{context}"] -=== `pipeline_metadata.collector` Fields - -This section contains metadata specific to the collector. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.collector.hostname` -|FQDN of the collector. It might be different from the FQDN of the actual emitter -of the logs. - -| `pipeline_metadata.collector.name` -|Name of the collector. - -| `pipeline_metadata.collector.version` -|Version of the collector. - -| `pipeline_metadata.collector.ipaddr4` -|IP address v4 of the collector server, can be an array. - -| `pipeline_metadata.collector.ipaddr6` -|IP address v6 of the collector server, can be an array. - -| `pipeline_metadata.collector.inputname` -|How the log message was received by the collector whether it was TCP/UDP, or -imjournal/imfile. - -| `pipeline_metadata.collector.received_at` -|Time when the message was received by the collector. - -| `pipeline_metadata.collector.original_raw_message` -|The original non-parsed log message, collected by the collector or as close to the -source as possible. -|=== - -[discrete] -[id="exported-fields-pipeline_metadata.normalizer_{context}"] -=== `pipeline_metadata.normalizer` Fields - -This section contains metadata specific to the normalizer. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.normalizer.hostname` -|FQDN of the normalizer. - -| `pipeline_metadata.normalizer.name` -|Name of the normalizer. - -| `pipeline_metadata.normalizer.version` -|Version of the normalizer. - -| `pipeline_metadata.normalizer.ipaddr4` -|IP address v4 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.ipaddr6` -|IP address v6 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.inputname` -|how the log message was received by the normalizer whether it was TCP/UDP. - -| `pipeline_metadata.normalizer.received_at` -|Time when the message was received by the normalizer. - -| `pipeline_metadata.normalizer.original_raw_message` -|The original non-parsed log message as it is received by the normalizer. - -| `pipeline_metadata.trace` -|The field records the trace of the message. Each collector and normalizer appends -information about itself and the date and time when the message was processed. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc deleted file mode 100644 index e26b60808513..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc +++ /dev/null @@ -1,1100 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-default_{context}"] -= Default exported fields - -These are the default fields exported by the logging system and available for searching -from Elasticsearch and Kibana. The default fields are Top Level and `collectd*` - -[discrete] -=== Top Level Fields - -The top level fields are common to every application and can be present in -every record. For the Elasticsearch template, top level fields populate the actual -mappings of `default` in the template's mapping section. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `@timestamp` -| The UTC value marking when the log payload was created, or when the log payload -was first collected if the creation time is not known. This is the log -processing pipeline's best effort determination of when the log payload was -generated. Add the `@` prefix convention to note a field as being reserved for a -particular use. With Elasticsearch, most tools look for `@timestamp` by default. -For example, the format would be 2015-01-24 14:06:05.071000. - -| `geoip` -|This is geo-ip of the machine. - -| `hostname` -|The `hostname` is the fully qualified domain name (FQDN) of the entity -generating the original payload. This field is an attempt to derive this -context. Sometimes the entity generating it knows the context. While other times -that entity has a restricted namespace itself, which is known by the collector -or normalizer. - -| `ipaddr4` -|The IP address V4 of the source server, which can be an array. - -| `ipaddr6` -|The IP address V6 of the source server, if available. - -| `level` -|The logging level as provided by rsyslog (severitytext property), python's -logging module. Possible values are as listed at -link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`misc/sys/syslog.h`] -plus `trace` and `unknown`. For example, _alert crit debug emerg err info notice -trace unknown warning_. Note that `trace` is not in the `syslog.h` list but many -applications use it. - -* You should only use `unknown` when the logging system gets a value it does not -understand, and note that it is the highest level. - -* Consider `trace` as higher or more verbose, than `debug`. - -* `error` is deprecated, use `err`. - -* Convert `panic` to `emerg`. - -* Convert `warn` to `warning`. - -Numeric values from `syslog/journal PRIORITY` can usually be mapped using the -priority values as listed at -link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[misc/sys/syslog.h]. - -Log levels and priorities from other logging systems should be mapped to the -nearest match. See -link:https://docs.python.org/2.7/library/logging.html#logging-levels[python -logging] for an example. - -| `message` -|A typical log entry message, or payload. It can be stripped of metadata pulled -out of it by the collector or normalizer, that is UTF-8 encoded. - -| `pid` -|This is the process ID of the logging entity, if available. - -| `service` -|The name of the service associated with the logging entity, if available. For -example, the `syslog APP-NAME` property is mapped to -the service field. - -| `tags` -|Optionally provided operator defined list of tags placed on each log by the -collector or normalizer. The payload can be a string with whitespace-delimited -string tokens, or a JSON list of string tokens. - -| `file` -|Optional path to the file containing the log entry local to the collector `TODO` -analyzer for file paths. - -| `offset` -|The offset value can represent bytes to the start of the log line in the file -(zero or one based), or log line numbers (zero or one based), as long as the -values are strictly monotonically increasing in the context of a single log -file. The values are allowed to wrap, representing a new version of the log file -(rotation). - -| `namespace_name` -|Associate this record with the `namespace` that shares it's name. This value -will not be stored, but it is used to associate the record with the appropriate -`namespace` for access control and visualization. Normally this value will be -given in the tag, but if the protocol does not support sending a tag, this field -can be used. If this field is present, it will override the `namespace` given in -the tag or in `kubernetes.namespace_name`. - -| `namespace_uuid` -|This is the `uuid` associated with the `namespace_name`. This value will not be -stored, but is used to associate the record with the appropriate namespace for -access control and visualization. If this field is present, it will override the -`uuid` given in `kubernetes.namespace_uuid`. This will also cause the Kubernetes -metadata lookup to be skipped for this log record. -|=== - -[discrete] -=== `collectd` Fields - -The following fields represent namespace metrics metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interval` -|type: float - -The `collectd` interval. - -| `collectd.plugin` -|type: string - -The `collectd` plug-in. - -| `collectd.plugin_instance` -|type: string - -The `collectd` plugin_instance. - -| `collectd.type_instance` -|type: string - -The `collectd` `type_instance`. - -| `collectd.type` -|type: string - -The `collectd` type. - -| `collectd.dstypes` -|type: string - -The `collectd` dstypes. -|=== - -[discrete] -[id="exported-fields-collectd.processes_{context}"] -=== `collectd.processes` Fields - -The following field corresponds to the `collectd` processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_state` -|type: integer -The `collectd ps_state` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_ops_{context}"] -=== `collectd.processes.ps_disk_ops` Fields - -The `collectd` `ps_disk_ops` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_ops.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_ops.write` -|type: float - -`TODO` - -| `collectd.processes.ps_vm` -|type: integer - -The `collectd` `ps_vm` type of processes plug-in. - -| `collectd.processes.ps_rss` -|type: integer - -The `collectd` `ps_rss` type of processes plug-in. - -| `collectd.processes.ps_data` -|type: integer - -The `collectd` `ps_data` type of processes plug-in. - -| `collectd.processes.ps_code` -|type: integer - -The `collectd` `ps_code` type of processes plug-in. - -| `collectd.processes.ps_stacksize` -| type: integer - -The `collectd` `ps_stacksize` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_cputime_{context}"] -=== `collectd.processes.ps_cputime` Fields - -The `collectd` `ps_cputime` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_cputime.user` -|type: float - -`TODO` - -| `collectd.processes.ps_cputime.syst` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_count_{context}"] -=== `collectd.processes.ps_count` Fields - -The `collectd` `ps_count` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_count.processes` -|type: integer - -`TODO` - -| `collectd.processes.ps_count.threads` -|type: integer - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_pagefaults_{context}"] -=== `collectd.processes.ps_pagefaults` Fields - -The `collectd` `ps_pagefaults` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_pagefaults.majflt` -|type: float - -`TODO` - -| `collectd.processes.ps_pagefaults.minflt` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_octets_{context}"] -=== `collectd.processes.ps_disk_octets` Fields - -The `collectd ps_disk_octets` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_octets.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_octets.write` -|type: float - -`TODO` - -| `collectd.processes.fork_rate` -|type: float - -The `collectd` `fork_rate` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk_{context}"] -=== `collectd.disk` Fields - -Corresponds to `collectd` disk plug-in. - -[discrete] -[id="exported-fields-collectd.disk.disk_merged_{context}"] -=== `collectd.disk.disk_merged` Fields - -The `collectd` `disk_merged` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_merged.read` -|type: float - -`TODO` - -| `collectd.disk.disk_merged.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_octets_{context}"] -=== `collectd.disk.disk_octets` Fields - -The `collectd` `disk_octets` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_octets.read` -|type: float - -`TODO` - -| `collectd.disk.disk_octets.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_time_{context}"] -=== `collectd.disk.disk_time` Fields - -The `collectd` `disk_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_time.read` -|type: float - -`TODO` - -| `collectd.disk.disk_time.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_ops_{context}"] -=== `collectd.disk.disk_ops` Fields - -The `collectd` `disk_ops` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_ops.read` -|type: float - -`TODO` - -| `collectd.disk.disk_ops.write` -|type: float - -`TODO` - -| `collectd.disk.pending_operations` -|type: integer - -The `collectd` `pending_operations` type of disk plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_io_time_{context}"] -=== `collectd.disk.disk_io_time` Fields - -The `collectd disk_io_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_io_time.io_time` -|type: float - -`TODO` - -| `collectd.disk.disk_io_time.weighted_io_time` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface_{context}"] -=== `collectd.interface` Fields - -Corresponds to the `collectd` interface plug-in. - -[discrete] -[id="exported-fields-collectd.interface.if_octets_{context}"] -=== `collectd.interface.if_octets` Fields - -The `collectd` `if_octets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_octets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_packets_{context}"] -=== `collectd.interface.if_packets` Fields - -The `collectd` `if_packets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_packets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_errors_{context}"] -=== `collectd.interface.if_errors` Fields - -The `collectd` `if_errors` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_errors.rx` -|type: float - -`TODO` - -| `collectd.interface.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_dropped_{context}"] -=== collectd.interface.if_dropped Fields - -The `collectd` `if_dropped` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.interface.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt_{context}"] -=== `collectd.virt` Fields - -Corresponds to `collectd` virt plug-in. - -[discrete] -[id="exported-fields-collectd.virt.if_octets_{context}"] -=== `collectd.virt.if_octets` Fields - -The `collectd if_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_octets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_packets_{context}"] -=== `collectd.virt.if_packets` Fields - -The `collectd` `if_packets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_packets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_errors_{context}"] -=== `collectd.virt.if_errors` Fields - -The `collectd` `if_errors` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_errors.rx` -|type: float - -`TODO` - -| `collectd.virt.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_dropped_{context}"] -=== `collectd.virt.if_dropped` Fields - -The `collectd` `if_dropped` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.virt.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_ops_{context}"] -=== `collectd.virt.disk_ops` Fields - -The `collectd` `disk_ops` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_ops.read` -|type: float - -`TODO` - -| `collectd.virt.disk_ops.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_octets_{context}"] -=== `collectd.virt.disk_octets` Fields - -The `collectd` `disk_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_octets.read` -|type: float - -`TODO` - -| `collectd.virt.disk_octets.write` -|type: float - -`TODO` - -| `collectd.virt.memory` -|type: float - -The `collectd` memory type of virt plug-in. - -| `collectd.virt.virt_vcpu` -|type: float - -The `collectd` `virt_vcpu` type of virt plug-in. - -| `collectd.virt.virt_cpu_total` -|type: float - -The `collectd` `virt_cpu_total` type of virt plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.CPU_{context}"] -=== `collectd.CPU` Fields - -Corresponds to the `collectd` CPU plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.CPU.percent` -|type: float - -The `collectd` type percent of plug-in CPU. -|=== - -[discrete] -[id="exported-fields-collectd.df_{context}"] -=== collectd.df Fields - -Corresponds to the `collectd` `df` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.df.df_complex` -|type: float - -The `collectd` type `df_complex` of plug-in `df`. - -| `collectd.df.percent_bytes` -|type: float - -The `collectd` type `percent_bytes` of plug-in `df`. -|=== - -[discrete] -[id="exported-fields-collectd.entropy_{context}"] -=== `collectd.entropy` Fields - -Corresponds to the `collectd` entropy plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.entropy.entropy` -|type: integer - -The `collectd` entropy type of entropy plug-in. -|=== - -//// -[discrete] -[id="exported-fields-collectd.nfs_{context}"] -=== `collectd.nfs` Fields - -Corresponds to the `collectd` NFS plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.nfs.nfs_procedure` -|type: integer - -The `collectd` `nfs_procedure` type of nfs plug-in. -|=== -//// - -[discrete] -[id="exported-fields-collectd.memory_{context}"] -=== `collectd.memory` Fields - -Corresponds to the `collectd` memory plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.memory.memory` -|type: float - -The `collectd` memory type of memory plug-in. - -| `collectd.memory.percent` -|type: float - -The `collectd` percent type of memory plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.swap_{context}"] -=== `collectd.swap` Fields - -Corresponds to the `collectd` swap plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.swap.swap` -|type: integer - -The `collectd` swap type of swap plug-in. - -| `collectd.swap.swap_io` -|type: integer - -The `collectd swap_io` type of swap plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.load_{context}"] -=== `collectd.load` Fields - -Corresponds to the `collectd` load plug-in. - -[discrete] -[id="exported-fields-collectd.load.load_{context}"] -=== `collectd.load.load` Fields - -The `collectd` load type of load plug-in - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.load.load.shortterm` -|type: float - -`TODO` - -| `collectd.load.load.midterm` -|type: float - -`TODO` - -| `collectd.load.load.longterm` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.aggregation_{context}"] -=== `collectd.aggregation` Fields - -Corresponds to `collectd` aggregation plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.aggregation.percent` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.statsd_{context}"] -=== `collectd.statsd` Fields - -Corresponds to `collectd` `statsd` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.statsd.host_cpu` -|type: integer - -The `collectd` CPU type of `statsd` plug-in. - -| `collectd.statsd.host_elapsed_time` -|type: integer - -The `collectd` `elapsed_time` type of `statsd` plug-in. - -| `collectd.statsd.host_memory` -|type: integer - -The `collectd` memory type of `statsd` plug-in. - -| `collectd.statsd.host_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx` -|type: integer - -The `collectd` `nic_rx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx` -|type: integer - -The `collectd` `nic_tx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_storage` -|type: integer - -The `collectd` storage type of `statsd` plug-in. - -| `collectd.statsd.host_swap` -|type: integer - -The `collectd` swap type of `statsd` plug-in. - -| `collectd.statsd.host_vdsm` -|type: integer - -The `collectd` VDSM type of `statsd` plug-in. - -| `collectd.statsd.host_vms` -|type: integer - -The `collectd` VMS type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_bytes` -|type: integer - -The `collectd` `nic_rx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_bytes` -|type: integer - -The `collectd` `nic_tx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_min` -|type: integer - -The `collectd` `balloon_min` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_max` -|type: integer - -The `collectd` `balloon_max` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_target` -|type: integer - -The `collectd` `balloon_target` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_cur` -| type: integer - -The `collectd` `balloon_cur` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_sys` -|type: integer - -The `collectd` `cpu_sys` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_usage` -|type: integer - -The `collectd` `cpu_usage` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_ops` -|type: integer - -The `collectd` `disk_read_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_ops` -|type: integer - -The `collectd` `disk_write_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_flush_latency` -|type: integer - -The `collectd` `disk_flush_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_apparent_size` -|type: integer - -The `collectd` `disk_apparent_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_bytes` -|type: integer - -The `collectd` `disk_write_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_rate` -|type: integer - -The `collectd` `disk_write_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_true_size` -|type: integer - -The `collectd` `disk_true_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_rate` -|type: integer - -The `collectd` `disk_read_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_latency` -|type: integer - -The `collectd` `disk_write_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_latency` -|type: integer - -The `collectd` `disk_read_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_bytes` -|type: integer - -The `collectd` `disk_read_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_user` -|type: integer - -The `collectd` `cpu_user` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.postgresql_{context}"] -=== `collectd.postgresql Fields` - -Corresponds to `collectd` `postgresql` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.postgresql.pg_n_tup_g` -|type: integer - -The `collectd` type `pg_n_tup_g` of plug-in postgresql. - -| `collectd.postgresql.pg_n_tup_c` -|type: integer - -The `collectd` type `pg_n_tup_c` of plug-in postgresql. - -| `collectd.postgresql.pg_numbackends` -|type: integer - -The `collectd` type `pg_numbackends` of plug-in postgresql. - -| `collectd.postgresql.pg_xact` -|type: integer - -The `collectd` type `pg_xact` of plug-in postgresql. - -| `collectd.postgresql.pg_db_size` -|type: integer - -The `collectd` type `pg_db_size` of plug-in postgresql. - -| `collectd.postgresql.pg_blks` -|type: integer - -The `collectd` type `pg_blks` of plug-in postgresql. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-docker.adoc b/_unused_topics/cluster-logging-exported-fields-docker.adoc deleted file mode 100644 index 26d77f062ca0..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-docker.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-container_{context}"] -= Container exported fields - -These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. -Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. - - -[discrete] -[id="pipeline_metadata.collector_{context}"] -=== `pipeline_metadata.collector` Fields - -This section contains metadata specific to the collector. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.collector.hostname` -|FQDN of the collector. It might be different from the FQDN of the actual emitter -of the logs. - -| `pipeline_metadata.collector.name` -|Name of the collector. - -| `pipeline_metadata.collector.version` -|Version of the collector. - -| `pipeline_metadata.collector.ipaddr4` -|IP address v4 of the collector server, can be an array. - -| `pipeline_metadata.collector.ipaddr6` -|IP address v6 of the collector server, can be an array. - -| `pipeline_metadata.collector.inputname` -|How the log message was received by the collector whether it was TCP/UDP, or -imjournal/imfile. - -| `pipeline_metadata.collector.received_at` -|Time when the message was received by the collector. - -| `pipeline_metadata.collector.original_raw_message` -|The original non-parsed log message, collected by the collector or as close to the -source as possible. -|=== - -[discrete] -[id="exported-fields-pipeline_metadata.normalizer_{context}"] -=== `pipeline_metadata.normalizer` Fields - -This section contains metadata specific to the normalizer. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.normalizer.hostname` -|FQDN of the normalizer. - -| `pipeline_metadata.normalizer.name` -|Name of the normalizer. - -| `pipeline_metadata.normalizer.version` -|Version of the normalizer. - -| `pipeline_metadata.normalizer.ipaddr4` -|IP address v4 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.ipaddr6` -|IP address v6 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.inputname` -|how the log message was received by the normalizer whether it was TCP/UDP. - -| `pipeline_metadata.normalizer.received_at` -|Time when the message was received by the normalizer. - -| `pipeline_metadata.normalizer.original_raw_message` -|The original non-parsed log message as it is received by the normalizer. - -| `pipeline_metadata.trace` -|The field records the trace of the message. Each collector and normalizer appends -information about itself and the date and time when the message was processed. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc deleted file mode 100644 index d40a3ddd446e..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc +++ /dev/null @@ -1,83 +0,0 @@ -[id="cluster-logging-exported-fields-kubernetes_{context}"] -= Kubernetes - -The following fields can be present in the namespace for kubernetes-specific metadata. - -== kubernetes.pod_name - -The name of the pod - -[horizontal] -Data type:: keyword - - -== kubernetes.pod_id - -Kubernetes ID of the pod. - -[horizontal] -Data type:: keyword - - -== kubernetes.namespace_name - -The name of the namespace in Kubernetes. - -[horizontal] -Data type:: keyword - - -== kubernetes.namespace_id - -ID of the namespace in Kubernetes. - -[horizontal] -Data type:: keyword - - -== kubernetes.host - -Kubernetes node name - -[horizontal] -Data type:: keyword - - -== kubernetes.master_url - -Kubernetes Master URL - -[horizontal] -Data type:: keyword - - -== kubernetes.container_name - -The name of the container in Kubernetes. - -[horizontal] -Data type:: text - - -== kubernetes.annotations - -Annotations associated with the Kubernetes object - -[horizontal] -Data type:: group - - -== kubernetes.labels - -Labels attached to the Kubernetes object Each label name is a subfield of labels field. Each label name is de-dotted: dots in the name are replaced with underscores. - -[horizontal] -Data type:: group - - -== kubernetes.event - -The kubernetes event obtained from kubernetes master API The event is already JSON object and as whole nested under kubernetes field This description should loosely follow 'type Event' in https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#event-v1-core - -[horizontal] -Data type:: group diff --git a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc deleted file mode 100644 index fec43d97ad1a..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-rsyslog_{context}"] -= `rsyslog` exported fields - -These are the `rsyslog` fields exported by the logging system and available for searching -from Elasticsearch and Kibana. - -The following fields are RFC5424 based metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `rsyslog.facility` -|See `syslog` specification for more information on `rsyslog`. - -| `rsyslog.protocol-version` -|This is the `rsyslog` protocol version. - -| `rsyslog.structured-data` -|See `syslog` specification for more information on `syslog` structured-data. - -| `rsyslog.msgid` -|This is the `syslog` msgid field. - -| `rsyslog.appname` -|If `app-name` is the same as `programname`, then only fill top-level field `service`. -If `app-name` is not equal to `programname`, this field will hold `app-name`. -See syslog specifications for more information. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc deleted file mode 100644 index 19e1d6a4cdca..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-systemd_{context}"] -= systemd exported fields - -These are the `systemd` fields exported by OpenShift Logging available for searching -from Elasticsearch and Kibana. - -Contains common fields specific to `systemd` journal. -link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html[Applications] -can write their own fields to the journal. These will be available under the -`systemd.u` namespace. `RESULT` and `UNIT` are two such fields. - -[discrete] -[id="exported-fields-systemd.k_{context}"] -=== `systemd.k` Fields - -The following table contains `systemd` kernel-specific metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.k.KERNEL_DEVICE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_DEVICE=[`systemd.k.KERNEL_DEVICE`] -is the kernel device name. - -| `systemd.k.KERNEL_SUBSYSTEM` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_SUBSYSTEM=[`systemd.k.KERNEL_SUBSYSTEM`] -is the kernel subsystem name. - -| `systemd.k.UDEV_DEVLINK` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVLINK=[`systemd.k.UDEV_DEVLINK`] -includes additional symlink names that point to the node. - -| `systemd.k.UDEV_DEVNODE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVNODE=[`systemd.k.UDEV_DEVNODE`] -is the node path of the device. - -| `systemd.k.UDEV_SYSNAME` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_SYSNAME=[ `systemd.k.UDEV_SYSNAME`] -is the kernel device name. - -|=== - -[discrete] -[id="exported-fields-systemd.t_{context}"] -=== `systemd.t` Fields - -`systemd.t Fields` are trusted journal fields, fields that are implicitly added -by the journal, and cannot be altered by client code. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.t.AUDIT_LOGINUID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_LOGINUID`] -is the user ID for the journal entry process. - -| `systemd.t.BOOT_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_BOOT_ID=[`systemd.t.BOOT_ID`] -is the kernel boot ID. - -| `systemd.t.AUDIT_SESSION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_SESSION`] -is the session for the journal entry process. - -| `systemd.t.CAP_EFFECTIVE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_CAP_EFFECTIVE=[`systemd.t.CAP_EFFECTIVE`] -represents the capabilities of the journal entry process. - -| `systemd.t.CMDLINE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.CMDLINE`] -is the command line of the journal entry process. - -| `systemd.t.COMM` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.COMM`] -is the name of the journal entry process. - -| `systemd.t.EXE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.EXE`] -is the executable path of the journal entry process. - -| `systemd.t.GID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.GID`] -is the group ID for the journal entry process. - -| `systemd.t.HOSTNAME` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_HOSTNAME=[`systemd.t.HOSTNAME`] -is the name of the host. - -| `systemd.t.MACHINE_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_MACHINE_ID=[`systemd.t.MACHINE_ID`] -is the machine ID of the host. - -| `systemd.t.PID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.PID`] -is the process ID for the journal entry process. - -| `systemd.t.SELINUX_CONTEXT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SELINUX_CONTEXT=[`systemd.t.SELINUX_CONTEXT`] -is the security context, or label, for the journal entry process. - -| `systemd.t.SOURCE_REALTIME_TIMESTAMP` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SOURCE_REALTIME_TIMESTAMP=[`systemd.t.SOURCE_REALTIME_TIMESTAMP`] -is the earliest and most reliable timestamp of the message. This is converted to RFC 3339 NS format. - -| `systemd.t.SYSTEMD_CGROUP` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_CGROUP`] -is the `systemd` control group path. - -| `systemd.t.SYSTEMD_OWNER_UID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_OWNER_UID`] -is the owner ID of the session. - -| `systemd.t.SYSTEMD_SESSION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SESSION`], -if applicable, is the `systemd` session ID. - -| `systemd.t.SYSTEMD_SLICE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SLICE`] -is the slice unit of the journal entry process. - -| `systemd.t.SYSTEMD_UNIT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_UNIT`] -is the unit name for a session. - -| `systemd.t.SYSTEMD_USER_UNIT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_USER_UNIT`], -if applicable, is the user unit name for a session. - -| `systemd.t.TRANSPORT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_TRANSPORT=[`systemd.t.TRANSPORT`] -is the method of entry by the journal service. This includes, `audit`, `driver`, -`syslog`, `journal`, `stdout`, and `kernel`. - -| `systemd.t.UID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.UID`] -is the user ID for the journal entry process. - -| `systemd.t.SYSLOG_FACILITY` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.SYSLOG_FACILITY`] -is the field containing the facility, formatted as a decimal string, for `syslog`. - -| `systemd.t.SYSLOG_IDENTIFIER` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.systemd.t.SYSLOG_IDENTIFIER`] -is the identifier for `syslog`. - -| `systemd.t.SYSLOG_PID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`SYSLOG_PID`] -is the client process ID for `syslog`. -|=== - -[discrete] -[id="exported-fields-systemd.u_{context}"] -=== `systemd.u` Fields - -`systemd.u Fields` are directly passed from clients and stored in the journal. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.u.CODE_FILE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FILE`] -is the code location containing the filename of the source. - -| `systemd.u.CODE_FUNCTION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FUNCTION`] -is the code location containing the function of the source. - -| `systemd.u.CODE_LINE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_LINE`] -is the code location containing the line number of the source. - -| `systemd.u.ERRNO` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#ERRNO=[`systemd.u.ERRNO`], -if present, is the low-level error number formatted in numeric value, as a decimal string. - -| `systemd.u.MESSAGE_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#MESSAGE_ID=[`systemd.u.MESSAGE_ID`] -is the message identifier ID for recognizing message types. - -| `systemd.u.RESULT` -|For private use only. - -| `systemd.u.UNIT` -|For private use only. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc deleted file mode 100644 index 82724afc1591..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-tlog_{context}"] -= Tlog exported fields - -These are the Tlog fields exported by the OpenShift Logging system and available for searching -from Elasticsearch and Kibana. - -Tlog terminal I/O recording messages. For more information see -link:https://github.com/Scribery/tlog[Tlog]. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `tlog.ver` -|Message format version number. - -| `tlog.user` -|Recorded user name. - -| `tlog.term` -|Terminal type name. - -| `tlog.session` -|Audit session ID of the recorded session. - -| `tlog.id` -|ID of the message within the session. - -| `tlog.pos` -|Message position in the session, milliseconds. - -| `tlog.timing` -|Distribution of this message's events in time. - -| `tlog.in_txt` -|Input text with invalid characters scrubbed. - -| `tlog.in_bin` -|Scrubbed invalid input characters as bytes. - -| `tlog.out_txt` -|Output text with invalid characters scrubbed. - -| `tlog.out_bin` -|Scrubbed invalid output characters as bytes. -|=== diff --git a/_unused_topics/cluster-logging-kibana-console-launch.adoc b/_unused_topics/cluster-logging-kibana-console-launch.adoc deleted file mode 100644 index 44b23c483030..000000000000 --- a/_unused_topics/cluster-logging-kibana-console-launch.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-kibana-console.adoc -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-kibana-visualize_{context}"] -= Launching the Kibana interface - -The Kibana interface is a browser-based console -to query, discover, and visualize your Elasticsearch data through histograms, line graphs, -pie charts, heat maps, built-in geospatial support, and other visualizations. - -.Procedure - -To launch the Kibana interface: - -. In the {product-title} console, click *Observe* -> *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. You can now: -+ -* Search and browse your data using the Discover page. -* Chart and map your data using the Visualize page. -* Create and view custom dashboards using the Dashboard page. -+ -Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information, -on using the interface, see the link:https://www.elastic.co/guide/en/kibana/5.6/connect-to-elasticsearch.html[Kibana documentation]. diff --git a/_unused_topics/cluster-logging-log-forwarding-disable.adoc b/_unused_topics/cluster-logging-log-forwarding-disable.adoc deleted file mode 100644 index 680ea9b95686..000000000000 --- a/_unused_topics/cluster-logging-log-forwarding-disable.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-external.adoc - -[id="cluster-logging-log-forwarding-disable_{context}"] -= Disabling the Log Forwarding feature - -To disable the Log Forwarding feature, remove the `clusterlogging.openshift.io/logforwardingtechpreview:enabled` parameter from the Cluster Logging custom resource (CR) and delete the `ClusterLogForwarder` CR. The container and node logs will be forwarded to the internal {product-title} Elasticsearch instance. - -[IMPORTANT] -==== -You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents OpenShift Logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance. -==== - -.Procedure - -To disable the Log Forwarding feature: - -. Edit the OpenShift Logging CR in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- - -. Remove the `clusterlogging.openshift.io/logforwardingtechpreview` annotation: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - annotations: - clusterlogging.openshift.io/logforwardingtechpreview: enabled <1> - name: "instance" - namespace: "openshift-logging" -... ----- -<1> Remove this annotation. - -. Delete the `ClusterLogForwarder` CR: -+ -[source,terminal] ----- -$ oc delete LogForwarding instance -n openshift-logging ----- - diff --git a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc deleted file mode 100644 index ec4c0d37eac0..000000000000 --- a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-uninstall.adoc - -[id="cluster-logging-uninstall-ops_{context}"] -= Uninstall the infra cluster - -You can uninstall the infra cluster from OpenShift Logging. -After uninstalling, Fluentd no longer splits logs. - -.Procedure - -To uninstall the infra cluster: - -. - -. - -. diff --git a/_unused_topics/cnv-accessing-vmi-web.adoc b/_unused_topics/cnv-accessing-vmi-web.adoc deleted file mode 100644 index f733d2873fd5..000000000000 --- a/_unused_topics/cnv-accessing-vmi-web.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// - -[id="virt-accessing-vmi-web_{context}"] -= Connecting to a virtual machine with the web console - -You can connect to a virtual machine by using the web console. - -.Procedure - -. Ensure you are in the correct project. If not, click the *Project* -list and select the appropriate project. -. Click *Workloads* -> *Virtual Machines* to display the virtual -machines in the project. -. Select a virtual machine. -. In the *Overview* tab, click the `virt-launcher-` pod. -. Click the *Terminal* tab. If the terminal is blank, click the -terminal and press any key to initiate connection. diff --git a/_unused_topics/completing-installation.adoc b/_unused_topics/completing-installation.adoc deleted file mode 100644 index a3d3235f7312..000000000000 --- a/_unused_topics/completing-installation.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="completing-installation_{context}"] -= Completing and verifying the {product-title} installation - -When the bootstrap node is done with its work and has handed off control to the new {product-title} cluster, the bootstrap node is destroyed. The installation program waits for the cluster to initialize, creates a route to the {product-title} console, and presents the information and credentials you require to log in to the cluster. Here’s an example: - ----- -INFO Install complete!                                 - -INFO Run 'export KUBECONFIG=/home/joe/ocp/auth/kubeconfig' to manage the cluster with 'oc', the {product-title} CLI. - -INFO The cluster is ready when 'oc login -u kubeadmin -p ' succeeds (wait a few minutes). - -INFO Access the {product-title} web-console here: https://console-openshift-console.apps.mycluster.devel.example.com - -INFO Login to the console with user: kubeadmin, password: "password" ----- - -To access the {product-title} cluster from your web browser, log in as kubeadmin with the password, using the URL shown: - -     https://console-openshift-console.apps.mycluster.devel.example.com - -To access the {product-title} cluster from the command line, identify the location of the credentials file (export the KUBECONFIG variable) and log in as kubeadmin with the provided password: ----- -$ export KUBECONFIG=/home/joe/ocp/auth/kubeconfig - -$ oc login -u kubeadmin -p ----- - -At this point, you can begin using the {product-title} cluster. To understand the management of your {product-title} cluster going forward, you should explore the {product-title} control plane. diff --git a/_unused_topics/con-pod-reset-policy.adoc b/_unused_topics/con-pod-reset-policy.adoc deleted file mode 100644 index b317a1df6495..000000000000 --- a/_unused_topics/con-pod-reset-policy.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[[nodes-configuring-nodes]] -= Understanding Pod restart policy -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: - - -//from https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy -A Pod restart policy determines how {product-title} responds when containers in that Pod exit. -The policy applies to all containers in that Pod. - -The possible values are: - -* `Always` - Tries restarting a successfully exited container on the Pod continuously, with an exponential back-off delay (10s, 20s, 40s) until the Pod is restarted. The default is `Always`. -* `OnFailure` - Tries restarting a failed container on the Pod with an exponential back-off delay (10s, 20s, 40s) capped at 5 minutes. -* `Never` - Does not try to restart exited or failed containers on the Pod. Pods immediately fail and exit. - -//https://kubernetes-v1-4.github.io/docs/user-guide/pod-states/ -Once bound to a node, a Pod will never be bound to another node. This means that a controller is necessary in order for a Pod to survive node failure: - -[cols="3",options="header"] -|=== - -|Condition -|Controller Type -|Restart Policy - -|Pods that are expected to terminate (such as batch computations) -|xref:../../architecture/core_concepts/deployments.adoc#jobs[Job] -|`OnFailure` or `Never` - -|Pods that are expected to not terminate (such as web servers) -|xref:../../architecture/core_concepts/deployments.adoc#replication-controllers[Replication Controller] -| `Always`. - -|Pods that must run one-per-machine -|xref:../../dev_guide/daemonsets.adoc#dev-guide-daemonsets[Daemonset] -|Any -|=== - -If a container on a Pod fails and the restart policy is set to `OnFailure`, the Pod stays on the node and the container is restarted. If you do not want the container to -restart, use a restart policy of `Never`. - -//https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#handling-pod-and-container-failures -If an entire Pod fails, {product-title} starts a new Pod. Developers must address the possibility that applications might be restarted in a new Pod. In particular, -applications must handle temporary files, locks, incomplete output, and so forth caused by previous runs. - -For details on how {product-title} uses restart policy with failed containers, see -the link:https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#example-states[Example States] in the Kubernetes documentation. - diff --git a/_unused_topics/configuration-resource-configure.adoc b/_unused_topics/configuration-resource-configure.adoc deleted file mode 100644 index a65a1d4bb2fc..000000000000 --- a/_unused_topics/configuration-resource-configure.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - - -[id="configuration-resource-configure_{context}"] -= Configure the Configuration Resource - -To configure the Configuration Resource, you customize the Custom Resource Definition (CRD) that controls its Operator and deploy it to your cluster. - -.Prerequisites -* Deploy an {product-title} cluster. -* Review the CRD for the resource and provision any resources that your changes require. -* Access to the right user to do this thing. - -.Procedure - -. From some specific computer, modify the CRD for the resource to describe your intended configuration. Save the file in `whatever-the-location-is`. - -. Run the following command to update the CRD in your cluster: -+ ----- -$ oc something or other -- <1> --<2> ----- -<1> The CRD file that contains customizations for your resource. -<2> However you specify the cluster you’re changing. - -. Confirm that the resource reflects your changes. Run the following command and review the output: -+ ----- -$ oc something or other - -Output -Output -Output ----- -+ -If the output includes , the resource redeployed on your cluster. diff --git a/_unused_topics/configuring-local-provisioner.adoc b/_unused_topics/configuring-local-provisioner.adoc deleted file mode 100644 index 1e46999679b0..000000000000 --- a/_unused_topics/configuring-local-provisioner.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[id="configuring-local-provisioner_{context}"] -= Configuring the local provisioner - -{product-title} depends on an external provisioner to create PVs for local devices and to clean up PVs when they are not in use to enable reuse. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -[NOTE] -==== -The local volume provisioner is different from most provisioners and does not support dynamic provisioning. -==== - -[NOTE] -==== -The local volume provisioner requires administrators to preconfigure the local volumes on each node and mount them under discovery directories. The provisioner then manages the volumes by creating and cleaning up PVs for each volume. -==== - -.Procedure -. Configure the external provisioner using a ConfigMap to relate directories with storage classes, for example: -+ ----- - kind: ConfigMap -metadata: - name: local-volume-config -data: - storageClassMap: | - local-ssd: - hostDir: /mnt/local-storage/ssd - mountDir: /mnt/local-storage/ssd - local-hdd: - hostDir: /mnt/local-storage/hdd - mountDir: /mnt/local-storage/hdd ----- -<1> Name of the storage class. -<2> Path to the directory on the host. It must be a subdirectory of `*/mnt/local-storage*`. -<3> Path to the directory in the provisioner Pod. We recommend using the same directory structure as used on the host and `mountDir` can be omitted in this case. - -. Create a standalone namespace for the local volume provisioner and its configuration, for example: -+ ----- -$ oc new-project local-storage ----- - -With this configuration, the provisioner creates: - -* One PV with storage class `local-ssd` for every subdirectory mounted in the `*/mnt/local-storage/ssd*` directory -* One PV with storage class `local-hdd` for every subdirectory mounted in the `*/mnt/local-storage/hdd*` directory - -[WARNING] -==== -The syntax of the ConfigMap has changed between {product-title} 3.9 and 3.10. Since this feature is in Technology Preview, the ConfigMap is not automatically converted during the update. -==== diff --git a/_unused_topics/configuring-user-agent.adoc b/_unused_topics/configuring-user-agent.adoc deleted file mode 100644 index dda5f717be47..000000000000 --- a/_unused_topics/configuring-user-agent.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="configuring-user-agent"] -= Configuring the user agent -include::_attributes/common-attributes.adoc[] -:context: configuring-user-agent - -toc::[] - -include::modules/user-agent-overview.adoc[leveloffset=+1] - -include::modules/user-agent-configuring.adoc[leveloffset=+1] diff --git a/_unused_topics/customize-certificates-api-add-default.adoc b/_unused_topics/customize-certificates-api-add-default.adoc deleted file mode 100644 index a70aeb11709a..000000000000 --- a/_unused_topics/customize-certificates-api-add-default.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/api-server.adoc - -[id="add-default-api-server_{context}"] -= Add an API server default certificate - -To allow clients outside the cluster to validate the API server's -certificate, you can replace the default certificate -with one that is issued by a public or organizational CA. - -.Prerequisites - -* You must have a valid certificate and key in the PEM format. - -.Procedure - -. Create a secret that contains the certificate and key in the -`openshift-config` namespace. -+ ----- -$ oc create secret tls \//<1> - --cert= \//<2> - --key= \//<3> - -n openshift-config ----- -<1> `` is the name of the secret that will contain -the certificate. -<2> `` is the path to the certificate on your -local file system. -<3> `` is the path to the private key associated -with this certificate. - -. Update the API server to reference the created secret. -+ ----- -$ oc patch apiserver cluster \ - --type=merge -p \ - '{"spec": {"servingCerts": {"defaultServingCertificate": - {"name": ""}}}}' <1> ----- -<1> Replace `` with the name used for the secret in -the previous step. - -. Examine the `apiserver/cluster` object and confirm the secret is now -referenced. -+ ----- -$ oc get apiserver cluster -o yaml -... -spec: - servingCerts: - defaultServingCertificate: - name: -... ----- diff --git a/_unused_topics/deploying-local-provisioner.adoc b/_unused_topics/deploying-local-provisioner.adoc deleted file mode 100644 index bfef02c41d1f..000000000000 --- a/_unused_topics/deploying-local-provisioner.adoc +++ /dev/null @@ -1,20 +0,0 @@ -[id="deploying-local-provisioner_{context}"] -= Deploying the local provisioner - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* Before starting the provisioner, mount all local devices and create a ConfigMap with storage classes and their directories. - -.Procedure - -. Install the local provisioner from the `*local-storage-provisioner-template.yaml*` file. -. Create a service account that allows running Pods as a root user, using hostPath volumes, and using any SELinux context to monitor, manage, and clean local volumes, for example: -+ ----- -$ oc create serviceaccount local-storage-admin -$ oc adm policy add-scc-to-user privileged -z local-storage-admin ----- -+ -To allow the provisioner Pod to delete content on local volumes created by any Pod, root privileges and any SELinux context are required. hostPath is required to access the `*/mnt/local-storage*` path on the host. diff --git a/_unused_topics/distr-tracing-deploy-otel-collector.adoc b/_unused_topics/distr-tracing-deploy-otel-collector.adoc deleted file mode 100644 index d628b2501f73..000000000000 --- a/_unused_topics/distr-tracing-deploy-otel-collector.adoc +++ /dev/null @@ -1,128 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-deploy-otel-collector_{context}"] -= Deploying distributed tracing data collection - -The custom resource definition (CRD) defines the configuration used when you deploy an instance of {OTELName}. - -.Prerequisites - -* The {OTELName} Operator has been installed. -//* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the OpenShift web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. -+ -[NOTE] -==== -If you are installing distributed tracing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You might have to wait a few moments for the Operators to be copied to the new project. - -. Click the *{OTELName} Operator*. On the *Details* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *OpenTelemetryCollector*, click *Create Instance*. - -. On the *Create OpenTelemetry Collector* page, to install using the defaults, click *Create* to create the {OTELShortName} instance. - -. On the *OpenTelemetryCollectors* page, click the name of the {OTELShortName} instance, for example, `opentelemetrycollector-sample`. - -. On the *Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing. - -[id="distr-tracing-deploy-otel-collector-cli_{context}"] -= Deploying {OTELShortName} from the CLI - -Follow this procedure to create an instance of {OTELShortName} from the command line. - -.Prerequisites - -* The {OTELName} Operator has been installed and verified. -+ -//* You have reviewed the instructions for how to customize the deployment. -+ -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login https://:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jopentelemetrycollector-sample.yaml` that contains the following text: -+ -.Example opentelemetrycollector.yaml -[source,yaml] ----- - apiVersion: opentelemetry.io/v1alpha1 - kind: OpenTelemetryCollector - metadata: - name: opentelemetrycollector-sample - namespace: openshift-operators - spec: - image: >- - registry.redhat.io/rhosdt/opentelemetry-collector-rhel8@sha256:61934ea5793c55900d09893e8f8b1f2dbd2e712faba8e97684e744691b29f25e - config: | - receivers: - jaeger: - protocols: - grpc: - exporters: - logging: - service: - pipelines: - traces: - receivers: [jaeger] - exporters: [logging] ----- - -. Run the following command to deploy {JaegerShortName}: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f opentelemetrycollector.yaml ----- - -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -opentelemetrycollector-cdff7897b-qhfdx 2/2 Running 0 24s ----- diff --git a/_unused_topics/exploring-cvo.adoc b/_unused_topics/exploring-cvo.adoc deleted file mode 100644 index 416394623c91..000000000000 --- a/_unused_topics/exploring-cvo.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="exploring-cvo_{context}"] -= Exploring the CVO - -To see the current version that your cluster is on, type: - ----- -$ oc get clusterversion - -NAME    VERSION   AVAILABLE PROGRESSING SINCE STATUS -version 4.5.4 True      False       10h   Cluster version is 4.5.4 ----- - -Each release version is represented by a set of images. To see basic release information and a list of those images, type: - ----- -$ oc adm release info - -Name:          4.0.0-0.7 -Digest:        sha256:641c0e4f550af59ec20349187a31751ae5108270f13332d1771935520ebf34c1 -Created:   2019-03-05 13:33:12 -0500 EST -OS/Arch:   linux/amd64 -Manifests: 248 -Release Metadata: -  Version:  4.0.0-0.7 -  Upgrades: 4.0.0-0.6 -  Metadata: -        description: Beta 2 -Component Versions: -  Kubernetes 1.13.4 -Images: -  NAME                        DIGEST -  aws-machine-controllers     sha256:630e8118038ee97b8b3bbfed7d9b63e06c1346c606e11908064ea3f57bd9ff8e -  cli                         sha256:93e16a8c56ec4031b5fa68683f75910aad57b54160a1e6054b3d3e96d9a4b376 -  cloud-credential-operator   sha256:bbc8d586b2210ac44de554558fd299555e72fb662b6751589d69b173b03aa821 -…​ ----- - -To see the Operators managed on the control plane by the Cluster Version Operator, type: - ----- -$ oc get clusteroperator -NAME                                 VERSION  AVAILABLE PROGRESSING DEGRADED SINCE -cluster-autoscaler                            True      False       False   10h -cluster-storage-operator                      True      False       False   10h -console                                       True      False       False   10h -dns                                           True      False       False   10h -image-registry                                True      False       False   10h -ingress                                       True      False       False   10h -kube-apiserver                                True      False       False   10h -kube-controller-manager                       True      False       False   10h -kube-scheduler                                True      False       False   10h -machine-api                                   True      False       False   10h -machine-config                                True      False       False   10h -marketplace-operator                          True      False       False   10h -monitoring                                    True      False       False   156m -network                                       True      False       False   139m -node-tuning                                   True      False       False   10h -openshift-apiserver                           True      False       False   19m -openshift-authentication                      True      False       False   10h -openshift-cloud-credential-operator           True      False       False   10h -openshift-controller-manager                  True      False       False   10h -openshift-samples                             True      False       False   10h -operator-lifecycle-manager                    True      False       False   10h ----- - -While most of the Cluster Operators listed provide services to the {product-title} cluster, the machine-config Operator in particular is tasked with managing the {op-system} operating systems in the nodes. diff --git a/_unused_topics/identity-provider-create-CR.adoc b/_unused_topics/identity-provider-create-CR.adoc deleted file mode 100644 index 8014c4ae6ab6..000000000000 --- a/_unused_topics/identity-provider-create-CR.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-allow-all-identity-provider.adoc -// * authentication/identity_providers/configuring-deny-all-identity-provider.adoc -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-create-CR_{context}"] -= Creating the CR that describes an identity provider - -Before you can add an identity provider to your cluster, create a Custom -Resource (CR) that describes it. - -.Prerequisites - -* Create an {product-title} cluster. - -.Procedure - -Create a CR file to describe the identity provider. A generic file displaying -the structure is below. - ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: my_identity_provider <1> - mappingMethod: claim <2> - type: <3> - ... ----- -<1> A unique name defining the identity provider. This provider name is -prefixed to provider user names to form an identity name. -<2> Controls how mappings are established between this provider's identities and user objects. -<3> The type of identity provider to be configured. -+ -Provide the parameters that are required for your identity provider type. diff --git a/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc b/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc deleted file mode 100644 index 6d490d6ace10..000000000000 --- a/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="identity-provider-provisioning-user-lookup-mapping_{context}"] -= Manually provisioning a user when using the lookup mapping method - -When using the `lookup` mapping method, user provisioning is done by an external system, via the API. -Typically, identities are automatically mapped to users during login. The 'lookup' mapping method automatically -disables this automatic mapping, which requires you to provision users manually. - - -.Procedure - -If you are using the `lookup` mapping method, use the following steps for each user after configuring -the identity provider: - -. Create an {product-title} User, if not created already: -+ ----- -$ oc create user ----- -+ -For example, the following command creates an {product-title} User `bob`: -+ ----- -$ oc create user bob ----- - -. Create an {product-title} Identity, if not created already. Use the name of the identity provider and -the name that uniquely represents this identity in the scope of the identity provider: -+ ----- -$ oc create identity : ----- -+ -The `` is the name of the identity provider in the master configuration, -as shown in the appropriate identity provider section below. -+ -For example, the following commands creates an Identity with identity provider `ldap_provider` and the identity provider user name `bob_s`. -+ ----- -$ oc create identity ldap_provider:bob_s ----- - -. Create a user/identity mapping for the created user and identity: -+ ----- -$ oc create useridentitymapping : ----- -+ -For example, the following command maps the identity to the user: -+ ----- -$ oc create useridentitymapping ldap_provider:bob_s bob ----- diff --git a/_unused_topics/images-s2i-java-pulling-images.adoc b/_unused_topics/images-s2i-java-pulling-images.adoc deleted file mode 100644 index dc9604744b07..000000000000 --- a/_unused_topics/images-s2i-java-pulling-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-java-pulling-images_{context}"] -= Pulling images for Java - -The Red Hat Enterprise Linux (RHEL) 8 image is available through the Red Hat Registry. - -.Procedure - -. To pull the RHEL 8 image, enter the following command: -[source,terminal] ----- -$ podman pull registry.redhat.io/redhat-openjdk-18/openjdk18-openshift ----- - -To use this image on {product-title}, you can either access it directly from the Red Hat Registry or push it into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. - -//// -Your {product-title} resources can then reference the link:https://github.com/jboss-openshift/application-templates/blob/master/jboss-image-streams.json[image stream definition]. -//// diff --git a/_unused_topics/images-s2i-nodejs-pulling-images.adoc b/_unused_topics/images-s2i-nodejs-pulling-images.adoc deleted file mode 100644 index 32fab99ea8ce..000000000000 --- a/_unused_topics/images-s2i-nodejs-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-nodejs-pulling-images_{context}"] -= Pulling images for Node.js - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Node.js you want: -+ -.Node.js `12` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-12-rhel7:latest ----- -+ -.Node.js `10` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-10-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -This image is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/nodejs-010-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl-configuration.adoc b/_unused_topics/images-using-images-s2i-perl-configuration.adoc deleted file mode 100644 index 563ba407e4be..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-configuration.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-configuration_{context}"] -= Configuring source-to-image for Perl - -The Perl image supports a number of environment variables which can be set to control the configuration and behavior of the Perl runtime. - -To set these environment variables as part of your image, you can place them into -a `.s2i/environment` file inside your source code repository, or define them in -the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Perl Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`ENABLE_CPAN_TEST` -|When set to `true`, this variable installs all the cpan modules and runs their tests. By default, the testing of the modules is disabled. - -|`CPAN_MIRROR` -|This variable specifies a mirror URL which cpanminus uses to install dependencies. By default, this URL is not specified. - -|`PERL_APACHE2_RELOAD` -|Set this to `true` to enable automatic reloading of modified Perl modules. By default, automatic reloading is disabled. - -|`HTTPD_START_SERVERS` -|The https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers[StartServers] directive sets the number of child server processes created on startup. Default is 8. - -|`HTTPD_MAX_REQUEST_WORKERS` -|Number of simultaneous requests that will be handled by Apache. The default is 256, but it will be automatically lowered if memory is limited. -|=== - -//Verify` oc log` is still valid. diff --git a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc deleted file mode 100644 index de276ad98264..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-hot-deploying_{context}"] -= Hot deploying for Perl - -Hot deployment allows you to quickly make and deploy changes to your application -without having to generate a new S2I build. To enable hot deployment in this -image, you must set the `PERL_APACHE2_RELOAD` environment variable to `true`. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- -+ -After you enter into the running container, your current directory is set to -`/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc b/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc deleted file mode 100644 index 996f9b752d74..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-pulling-images_{context}"] -= Pulling images for Perl - -//Images comes in two options: - -//* RHEL 8 -//* CentOS 7 - -// *RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Perl you want: -+ -.Perl `5.26` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-526-rhel7:latest ----- -+ -.Perl `5.30` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-530-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -A CentOS image for Perl 5.16 is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/perl-516-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl.adoc b/_unused_topics/images-using-images-s2i-perl.adoc deleted file mode 100644 index 01277ff90a72..000000000000 --- a/_unused_topics/images-using-images-s2i-perl.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl_{context}"] -= Perl overview - -{product-title} provides source-to-image (S2I) enabled Perl images for building and running Perl applications. The Perl S2I builder image assembles your application source with any required dependencies to create a new image containing your Perl application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-perl-accessing-logs_{context}"] -== Accessing logs -Access logs are streamed to standard output and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-php-configuration.adoc b/_unused_topics/images-using-images-s2i-php-configuration.adoc deleted file mode 100644 index 7e2ec6f6d7fd..000000000000 --- a/_unused_topics/images-using-images-s2i-php-configuration.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-configuration_{context}"] -= Configuring source-to-image for PHP - -The PHP image supports a number of environment variables which can be set to control the configuration and behavior of the PHP runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -The following environment variables set their equivalent property value in the -`php.ini` file: - -.PHP Environment Variables -[cols="4a,6a,6a",options="header"] -|=== - -|Variable Name |Description |Default - -|`ERROR_REPORTING` -|Informs PHP of the errors, warnings, and notices for which you would like it to -take action. -|`E_ALL & ~E_NOTICE` - -|`DISPLAY_ERRORS` -|Controls if and where PHP outputs errors, notices, and warnings. -|`ON` - -|`DISPLAY_STARTUP_ERRORS` -|Causes any display errors that occur during PHP's startup sequence to be -handled separately from display errors. -|`OFF` - -|`TRACK_ERRORS` -|Stores the last error/warning message in `$php_errormsg` (boolean). -|`OFF` - -|`HTML_ERRORS` -|Links errors to documentation that is related to the error. -|`ON` - -|`INCLUDE_PATH` -|Path for PHP source files. -|`.:/opt/openshift/src:/opt/rh/php55/root/usr/share/pear` - -|`SESSION_PATH` -|Location for session data files. -|`/tmp/sessions` - -|`DOCUMENTROOT` -|Path that defines the document root for your application (for example, `/public`). -|`/` -|=== - -The following environment variable sets its equivalent property value in the -`opcache.ini` file: - -.Additional PHP settings -[cols="3a,6a,1a",options="header"] -|=== - -|Variable Name |Description |Default - -|`OPCACHE_MEMORY_CONSUMPTION` -|The link:http://php.net/manual/en/book.opcache.php[OPcache] shared memory -storage size. -|`16M` - -|`OPCACHE_REVALIDATE_FREQ` -|How often to check script time stamps for updates, in seconds. `0` results in -link:http://php.net/manual/en/book.opcache.php[OPcache] checking for updates on -every request. -|`2` -|=== - -You can also override the entire directory used to load the PHP configuration by setting: - -.Additional PHP settings -[cols="3a,6a",options="header"] -|=== - -| Variable Name | Description - -|`PHPRC` -|Sets the path to the `php.ini` file. - -|`*PHP_INI_SCAN_DIR*` -|Path to scan for additional `.ini` configuration files -|=== - -You can use a custom composer repository mirror URL to download packages instead of the default `packagist.org`: - -.Composer Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable Name |Description - -|`COMPOSER_MIRROR` -|Set this variable to use a custom Composer repository mirror URL to download required packages during the build process. -Note: This only affects packages listed in `composer.json`. -|=== - -[id="images-using-images-s2i-php-apache-configuration_{context}"] -== Apache configuration - -If the `DocumentRoot` of the application is nested in the source directory `/opt/openshift/src`, you can provide your own `.htaccess` file to override the default Apache behavior and specify how application requests should be handled. The `.htaccess` file must be located at the root of the application source. diff --git a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc deleted file mode 100644 index f8a852dd3447..000000000000 --- a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-hot-deploying_{context}"] -= Hot deploying for PHP - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. In order to immediately pick up changes made in your application source code, you must run your built image with the `OPCACHE_REVALIDATE_FREQ=0` environment variable. - -You can use the `oc env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc b/_unused_topics/images-using-images-s2i-php-pulling-images.adoc deleted file mode 100644 index 51691eb98a56..000000000000 --- a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-pulling-images_{context}"] -= Pulling images for PHP - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of PHP you want: - -.PHP `8.1` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/php-81:latest ----- -+ -.PHP `7.3` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/php-73-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -CentOS images for PHP 5.5 and 5.6 are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Node.js you want: -+ -.PHP `5.5` -[source,terminal] ----- -$ podman pull openshift/php-55-centos7 ----- -+ -.PHP `5.6` -[source,terminal] ----- -$ podman pull openshift/php-56-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-php.adoc b/_unused_topics/images-using-images-s2i-php.adoc deleted file mode 100644 index 116276a93b06..000000000000 --- a/_unused_topics/images-using-images-s2i-php.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php_{context}"] -= PHP overview - -{product-title} provides source-to-image (S2I) enabled PHP images for building and running PHP applications.The PHP S2I builder image assembles your application source with any required dependencies to create a new image containing your PHP application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-php-accessing-logs_{context}"] -== Accessing logs - -Access logs are streamed to standard out and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-python-configuration.adoc b/_unused_topics/images-using-images-s2i-python-configuration.adoc deleted file mode 100644 index f2dfd34cbbb9..000000000000 --- a/_unused_topics/images-using-images-s2i-python-configuration.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-configuration_{context}"] -= Configuring source-to-image for Python - -The Python image supports a number of environment variables which can be set to control the configuration and behavior of the Python runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Python Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`APP_FILE` -|This variable specifies the file name passed to the Python interpreter which is responsible for launching the application. This variable is set to `app.py` by default. - -|`APP_MODULE` -|This variable specifies the WSGI callable. It follows the pattern `$(MODULE_NAME):$(VARIABLE_NAME)`, where the module name is a full dotted path and the variable name refers to a function inside the specified module. If you use `setup.py` for installing the application, then the module name can be read from that file and the variable defaults to `application`. - -|`APP_CONFIG` -|This variable indicates the path to a valid Python file with a http://docs.gunicorn.org/en/latest/configure.html[gunicorn configuration]. - -|`DISABLE_COLLECTSTATIC` -|Set it to a nonempty value to inhibit the execution of `manage.py collectstatic` during the build. Only affects Django projects. - -|`DISABLE_MIGRATE` -|Set it to a nonempty value to inhibit the execution of `manage.py migrate` when the produced image is run. Only affects Django projects. - -|`*PIP_INDEX_URL*` -| Set this variable to use a custom index URL or mirror to download required -packages during build process. This only affects packages listed in the -*_requirements.txt_* file. - -| `WEB_CONCURRENCY` -| Set this to change the default setting for the number of http://docs.gunicorn.org/en/stable/settings.html#workers[workers]. By default, this is set to the number of available cores times 4. -|=== diff --git a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc deleted file mode 100644 index 03989935aebb..000000000000 --- a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-hot-deploying_{context}"] -= Hot deploying - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. If you are using Django, hot deployment works out of the box. - -To enable hot deployment while using Gunicorn, ensure you have a Gunicorn -configuration file inside your repository with https://gunicorn-docs.readthedocs.org/en/latest/settings.html#reload[the `reload` option set to `true`. Specify your configuration file using the `APP_CONFIG` environment variable. For example, see the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -. Procedure - -To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc b/_unused_topics/images-using-images-s2i-python-pulling-images.adoc deleted file mode 100644 index 0d90476cf0b7..000000000000 --- a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-pulling-images_{context}"] -= Pulling images for Python - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull egistry.redhat.io/rhscl/python-27-rhel7:latest ----- -+ -.Python `3.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/python-39:latest ----- -+ -.Python `3.8` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/python-38-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull centos/python-27-centos7 ----- -+ -.Python `3.3` -[source,terminal] ----- -$ podman pull openshift/python-33-centos7 ----- -+ -.Python `3.4` -[source,terminal] ----- -$ podman pull centos/python-34-centos7 ----- -+ -.Python `3.5` -[source,terminal] ----- -$ podman pull centos/python-35-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-python.adoc b/_unused_topics/images-using-images-s2i-python.adoc deleted file mode 100644 index 92c996b56fa7..000000000000 --- a/_unused_topics/images-using-images-s2i-python.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python_{context}"] -= python overview - -{product-title} provides source-to-image (S2I) enabled Python images for building and running Python applications. The Python S2I builder image assembles your application source with any required dependencies to create a new image containing your Python application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc b/_unused_topics/images-using-images-s2i-ruby-configuration.adoc deleted file mode 100644 index 07841e122384..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-configuration_{context}"] -= Configuring source-to-image for Ruby - -The Ruby image supports a number of environment variables which can be set to control the configuration and behavior of the Ruby runtime. - -To set these environment variables as part of your image, you can place them into a `_.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Ruby Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`RACK_ENV` -|This variable specifies the environment within which the Ruby application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RACK_ENV` is set to `production`. The default value is `production`. - -|`RAILS_ENV` -|This variable specifies the environment within which the Ruby on Rails application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RAILS_ENV` is set to `production`. This variable is set to `${RACK_ENV}` by default. - -|`DISABLE_ASSET_COMPILATION` -|When set to `true`, this variable disables the process of asset compilation. Asset compilation only happens when the application runs in a production environment. Therefore, you can use this variable when assets have already been compiled. - -|`PUMA_MIN_THREADS`, `PUMA_MAX_THREADS` -|This variable indicates the minimum and maximum number of threads that will be available in Puma's thread pool. - -|`PUMA_WORKERS` -|This variable indicates the number of worker processes to be launched in Puma's clustered mode, when Puma runs more than two processes. If not explicitly set, the default behavior sets `PUMA_WORKERS` to a value that is appropriate for the memory available to the container and the number of cores on the host. - -|`RUBYGEM_MIRROR` -|Set this variable to use a custom RubyGems mirror URL to download required gem packages during the build process. This environment variable is only available for Ruby 2.2+ images. -|=== diff --git a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc deleted file mode 100644 index 6463af2986fb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-hot-deploying_{context}"] -== Hot deploying for Ruby - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. The method for enabling hot deployment in this image differs based on the application type. - -*Ruby on Rails applications* - -.Procedure - -For Ruby on Rails application, run the built Rails application with the `RAILS_ENV=development` environment variable passed to the running pod. - -* For an existing deployment configuration, you can use the `oc set env` command: -+ -[source,terminal] ----- -$ oc set env dc/rails-app RAILS_ENV=development ----- - -*Other Types of Ruby applications such as Sinatra or Padrino* - -For other types of Ruby applications, your application must be built with a gem that can reload the server every time a change to the source code is made inside the running container. Those gems are: - -* Shotgun -* Rerun -* Rack-livereload - -To be able to run your application in development mode, you must modify the S2I `run` script so that the web server is launched by the chosen gem, which checks for changes in the source code. - -After you build your application image with your version of the S2I `run` script, run the image with the `RACK_ENV=development` environment variable. For example, you can use the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc b/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc deleted file mode 100644 index 9829367e28eb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-pulling-images_{context}"] -= Pulling images for Ruby - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.5` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-25-rhel7:latest ----- -+ -.Ruby `2.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-26-rhel7:latest ----- -+ -.Ruby `2.7` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-27-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.0` -[source,terminal] ----- -$ podman pull openshift/ruby-20-centos7 ----- -+ -.Ruby `2.2` -[source,terminal] ----- -$ podman pull openshift/ruby-22-centos7 ----- -+ -.Ruby `2.3` -[source,terminal] ----- -$ podman pull centos/ruby-23-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-ruby.adoc b/_unused_topics/images-using-images-s2i-ruby.adoc deleted file mode 100644 index feed3359d273..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby_{context}"] -= Ruby overview - -{product-title} provides source-to-image (S2I) enabled Ruby images for building and running Ruby applications. The Ruby S2I builder image assembles your application source with any required dependencies to create a new image containing your Ruby application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/installation-about-custom.adoc b/_unused_topics/installation-about-custom.adoc deleted file mode 100644 index 8e26117c63b6..000000000000 --- a/_unused_topics/installation-about-custom.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="installation-about-custom_{context}"] -= About the custom installation - -You can use the {product-title} installation program to customize four levels -of the program: - -* {product-title} itself -* The cluster platform -* Kubernetes -* The cluster operating system - -Changes to {product-title} and its platform are managed and supported, but -changes to Kubernetes and the cluster operating system currently are not. If -you customize unsupported levels program levels, future installation and -upgrades might fail. - -When you select values for the prompts that the installation program presents, -you customize {product-title}. You can further modify the cluster platform -by modifying the `install-config.yaml` file that the installation program -uses to deploy your cluster. In this file, you can make changes like setting the -number of machines that the control plane uses, the type of virtual machine -that the cluster deploys, or the CIDR range for the Kubernetes service network. - -It is possible, but not supported, to modify the Kubernetes objects that are injected into the cluster. -A common modification is additional manifests in the initial installation. -No validation is available to confirm the validity of any modifications that -you make to these manifests, so if you modify these objects, you might render -your cluster non-functional. -[IMPORTANT] -==== -Modifying the Kubernetes objects is not supported. -==== - -Similarly it is possible, but not supported, to modify the -Ignition config files for the bootstrap and other machines. No validation is -available to confirm the validity of any modifications that -you make to these Ignition config files, so if you modify these objects, you might render -your cluster non-functional. - -[IMPORTANT] -==== -Modifying the Ignition config files is not supported. -==== - -To complete a custom installation, you use the installation program to generate -the installation files and then customize them. -The installation status is stored in a hidden -file in the asset directory and contains all of the installation files. diff --git a/_unused_topics/installation-creating-worker-machineset.adoc b/_unused_topics/installation-creating-worker-machineset.adoc deleted file mode 100644 index fab07717826c..000000000000 --- a/_unused_topics/installation-creating-worker-machineset.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="installation-creating-worker-machineset_{context}"] -= Creating worker nodes that the cluster manages - -After your cluster initializes, you can create workers that are controlled by -a MachineSet in your Amazon Web Services (AWS) user-provisioned Infrastructure -cluster. - -.Prerequisites - -* Install a cluster on AWS using infrastructer that you provisioned. - -.Procedure - -. Optional: Launch worker nodes that are controlled by the machine API. -. View the list of MachineSets in the `openshift-machine-api` namespace: -+ ----- -$ oc get machinesets --namespace openshift-machine-api -NAME DESIRED CURRENT READY AVAILABLE AGE -test-tkh7l-worker-us-east-2a 1 1 11m -test-tkh7l-worker-us-east-2b 1 1 11m -test-tkh7l-worker-us-east-2c 1 1 11m ----- -+ -Note the `NAME` of each MachineSet. Because you use a different subnet than the -installation program expects, the worker MachineSets do not use the correct -network settings. You must edit each of these MachineSets. - -. Edit each worker MachineSet to provide the correct values for your cluster: -+ ----- -$ oc edit machineset --namespace openshift-machine-api test-tkh7l-worker-us-east-2a -o yaml -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - creationTimestamp: 2019-03-14T14:03:03Z - generation: 1 - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: test-tkh7l-worker-us-east-2a - namespace: openshift-machine-api - resourceVersion: "2350" - selfLink: /apis/machine.openshift.io/v1beta1/namespaces/openshift-machine-api/machinesets/test-tkh7l-worker-us-east-2a - uid: e2a6c8a6-4661-11e9-a9b0-0296069fd3a2 -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - spec: - metadata: - creationTimestamp: null - providerSpec: - value: - ami: - id: ami-07e0e0e0035b5a3fe <1> - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - ebs: - iops: 0 - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: test-tkh7l-worker-profile - instanceType: m4.large - kind: AWSMachineProviderConfig - metadata: - creationTimestamp: null - placement: - availabilityZone: us-east-2a - region: us-east-2 - publicIp: null - securityGroups: - - filters: - - name: tag:Name - values: - - test-tkh7l-worker-sg <2> - subnet: - filters: - - name: tag:Name - values: - - test-tkh7l-private-us-east-2a - tags: - - name: kubernetes.io/cluster/test-tkh7l - value: owned - userDataSecret: - name: worker-user-data - versions: - kubelet: "" -status: - fullyLabeledReplicas: 1 - observedGeneration: 1 - replicas: 1 ----- -<1> Specify the {op-system-first} AMI to use for your worker nodes. Use the same -value that you specified in the parameter values for your control plane and -bootstrap templates. -<2> Specify the name of the worker security group that you created in the form -`-worker-sg`. `` is the same -infrastructure name that you extracted from the Ignition config metadata, -which has the format `-`. - -//// -. Optional: Replace the `subnet` stanza with one that specifies the subnet -to deploy the machines on: -+ ----- -subnet: - filters: - - name: tag: <1> - values: - - test-tkh7l-private-us-east-2a <2> ----- -<1> Set the `` of the tag to `Name`, `ID`, or `ARN`. -<2> Specify the `Name`, `ID`, or `ARN` value for the subnet. This value must -match the `tag` type that you specify. -//// - -. View the machines in the `openshift-machine-api` namespace and confirm that -they are launching: -+ ----- -$ oc get machines --namespace openshift-machine-api -NAME INSTANCE STATE TYPE REGION ZONE AGE -test-tkh7l-worker-us-east-2a-hxlqn i-0e7f3a52b2919471e pending m4.4xlarge us-east-2 us-east-2a 3s ----- diff --git a/_unused_topics/installation-osp-troubleshooting.adoc b/_unused_topics/installation-osp-troubleshooting.adoc deleted file mode 100644 index 8b5bcff20bd9..000000000000 --- a/_unused_topics/installation-osp-troubleshooting.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * n/a - -[id="installation-osp-customizing_{context}"] - -= Troubleshooting {product-title} on OpenStack installations - -// Structure as needed in the end. This is very much a WIP. -// A few more troubleshooting and/or known issues blurbs incoming - -Unfortunately, there will always be some cases where {product-title} fails to install properly. In these events, it is helpful to understand the likely failure modes as well as how to troubleshoot the failure. - -This document discusses some troubleshooting options for {rh-openstack}-based -deployments. For general tips on troubleshooting the installation program, see the [Installer Troubleshooting](../troubleshooting.md) guide. - -== View instance logs - -{rh-openstack} CLI tools must be installed, then: - ----- -$ openstack console log show ----- - -== Connect to instances via SSH - -Get the IP address of the machine on the private network: -``` -openstack server list | grep master -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | -``` - -And connect to it using the control plane machine currently holding the API as a jumpbox: - -``` -ssh -J core@${floating IP address}<1> core@ -``` -<1> The floating IP address assigned to the control plane machine. diff --git a/_unused_topics/looking-inside-nodes.adoc b/_unused_topics/looking-inside-nodes.adoc deleted file mode 100644 index 26ab2dccd6be..000000000000 --- a/_unused_topics/looking-inside-nodes.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// - -[id="looking-inside-openshift-nodes_{context}"] -= Looking inside {product-title} nodes - -Directly accessing a node is strongly discouraged. Nodes are meant to be managed entirely from the cluster and that are considered tainted if you log in to a node and change anything. That said, there might be times when you want to troubleshoot a problem on a node or simply go onto a node in a test environment to see how things work. - -For debugging purposes, the oc debug command lets you go inside any pod and look around. For nodes, in particular, you open a tools pod on the node, then chroot to the node’s host filesystem. At that point, you are effectively working on the node. Here’s how to do that: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION - -ip-10-0-0-1.us-east-2.compute.internal Ready worker 3h19m v1.25.0 - -ip-10-0-0-39.us-east-2.compute.internal Ready master 3h37m v1.25.0 - -… - -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ ----- - ----- -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ - -To use host binaries, run chroot /host - -If you don’t see a command prompt, try pressing enter. - -sh-4.3# ----- - -As noted, you can change to the root of the node’s filesystem by typing chroot /host and running commands from the host on that filesystem as though you were logged in directly from the host. Here are some examples of commands you can run to see what is happening on the node: - -* crictl: This CRI-O client command provides many of the same operations for examining images images and containers that the docker CLI offers the Docker Container Engine. One difference is that crictl can also act on pods. If you are debugging issues with containers run the {product-title} users or the {product-title} control plane, crictl is the best tool to use. -* podman: Provides many of the same features as crictl and docker CLI tools, but requires no container engine. On a node, podman can be useful for debugging container issues if the CRI-O runtime isn’t working. -* skopeo: Copy, delete, and inspect container images with skopeo. -* rpm-ostree: Use e.g. rpm-ostree status to look at the operating system state. -* journalctl: The standard journalctl command can be very useful for querying the system journal for messages that provide information about applications running on the system. - -Because the nodes are {op-system} Linux-based systems, you can use standard Linux commands to explore the nodes as well. These include ps, netstat, ip, route, rpm, and many others. You can change to the /etc directory on the host and look into configuration files for services running directly on the host. For example, look at /etc/crio/crio.conf for CRI-O settings, /etc/resolv.conf for DNS server settings, and /etc/ssh for SSH service configuration and keys. - -If you are unable to reach the nodes with oc debug, because something is wrong with the {product-title} cluster, you might be able to debug the nodes by setting up a bastion host on the cluster. For information on setting up a bastion host for {product-title}, see https://github.com/eparis/ssh-bastion[ssh-bastion]. diff --git a/_unused_topics/machine-configs-and-pools.adoc b/_unused_topics/machine-configs-and-pools.adoc deleted file mode 100644 index d627624ec571..000000000000 --- a/_unused_topics/machine-configs-and-pools.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="machine-configs-and-pools_{context}"] -= Machine Configs and Machine Config Pools -Machine Config Pools manage a cluster of nodes and their corresponding -Machine Configs. Machine Configs contain configuration information for a -cluster. - -To list all Machine Config Pools that are known: - ----- -$ oc get machineconfigpools -NAME CONFIG UPDATED UPDATING DEGRADED -master master-1638c1aea398413bb918e76632f20799 False False False -worker worker-2feef4f8288936489a5a832ca8efe953 False False False ----- - -To list all Machine Configs: ----- -$ oc get machineconfig -NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED OSIMAGEURL -00-master 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-master-ssh 4.0.0-0.150.0.0-dirty 16m -00-worker 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-worker-ssh 4.0.0-0.150.0.0-dirty 16m -01-master-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -01-worker-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -master-1638c1aea398413bb918e76632f20799 4.0.0-0.150.0.0-dirty 2.2.0 16m -worker-2feef4f8288936489a5a832ca8efe953 4.0.0-0.150.0.0-dirty 2.2.0 16m ----- - -To list all KubeletConfigs: - ----- -$ oc get kubeletconfigs ----- - -To get more detailed information about a KubeletConfig, including the reason for -the current condition: - ----- -$ oc describe kubeletconfig ----- - -For example: - ----- -# oc describe kubeletconfig set-max-pods - -Name: set-max-pods <1> -Namespace: -Labels: -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: KubeletConfig -Metadata: - Creation Timestamp: 2019-02-05T16:27:20Z - Generation: 1 - Resource Version: 19694 - Self Link: /apis/machineconfiguration.openshift.io/v1/kubeletconfigs/set-max-pods - UID: e8ee6410-2962-11e9-9bcc-664f163f5f0f -Spec: - Kubelet Config: <2> - Max Pods: 100 - Machine Config Pool Selector: <3> - Match Labels: - Custom - Kubelet: small-pods -Events: ----- - -<1> The name of the KubeletConfig. -<2> The user defined configuration. -<3> The Machine Config Pool selector to apply the KubeletConfig to. \ No newline at end of file diff --git a/_unused_topics/managing-dedicated-readers-group.adoc b/_unused_topics/managing-dedicated-readers-group.adoc deleted file mode 100644 index 511dc8313ab6..000000000000 --- a/_unused_topics/managing-dedicated-readers-group.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// administering_a_cluster/dedicated-admin-role.adoc - -[id="dedicated-managing-dedicated-readers-group_{context}"] -= Managing the dedicated-readers group - -Users with a `dedicated-reader` role are granted edit and view access to the -`dedicated-reader` project and view-only access to the other projects. - -To view a list of current dedicated readers by user name, you can use the -following command: - ----- -$ oc describe group dedicated-readers ----- - -To add a new member to the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups add-users dedicated-readers ----- - -To remove an existing user from the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups remove-users dedicated-readers ----- diff --git a/_unused_topics/metering-resources.adoc b/_unused_topics/metering-resources.adoc deleted file mode 100644 index 7b0f67114a9a..000000000000 --- a/_unused_topics/metering-resources.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-install-metering.adoc - -[id="metering-resources_{context}"] -= Metering resources - -Metering has many resources, which can be used to manage the deployment and installation of Metering, as well as the reporting functionality Metering provides. - -Metering is managed using the following CustomResourceDefinitions (CRDs): - -[cols="1,7"] -|=== - -|*MeteringConfig* |Configures the Metering stack for deployment. Contains customizations and configuration options to control each component that makes up the Metering stack. - -|*Reports* |Controls what query to use, when, and how often the query should be run, and where to store the results. - -|*ReportQueries* |Contains the SQL queries used to perform analysis on the data contained with in ReportDataSources. - -|*ReportDataSources* |Controls the data available to ReportQueries and Reports. Allows configuring access to different databases for use within Metering. - -|=== diff --git a/_unused_topics/microshift-adding-containers-to-blueprint.adoc b/_unused_topics/microshift-adding-containers-to-blueprint.adoc deleted file mode 100644 index 644138019618..000000000000 --- a/_unused_topics/microshift-adding-containers-to-blueprint.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="adding-microshift-container-images_{context}"] -= Adding the {product-title} container images - -You can embed {product-title}'s container images into the {op-system-ostree} images so that they are immediately available to the CRI-O container engine after booting. Embedded container images are not pulled over the network from a container registry. In Image Builder, a container image is embedded by adding a reference to it to the Image Builder blueprint. - -The following syntax must be used to add a configuration section to the blueprint file. You can then add your the container image to embed in the {op-system-ostree} image. - -.Example syntax for adding a container image to a blueprint - -[source,toml] ----- -[[containers]] -source = "" ----- - -.Prerequisites - -* You have installed jq. - -.Procedure - -You must have the exact list of container image references used by the {product-title} version to embed {product-title}'s container images. Use the `microshift-release-info` RPM package matching the version of the `microshift` RPM in your blueprint. Use the following procedure. - -. Download the `microshift-release-info` RPM package matching your {product-title} version by running the following commands: -+ -[source,terminal] ----- -$ VERSION=$(sudo yum list | awk "/^microshift\./ {print \$2;}") ----- -+ -[source,terminal] ----- -$ yum download microshift-release-info-${VERSION} ----- - -. Extract the release info by running the following command: -+ -[source,terminal] ----- -$ rpm2cpio microshift-release-info-${VERSION}.noarch.rpm | cpio -idmv -./usr/share/microshift/release/release-aarch64.json -./usr/share/microshift/release/release-x86_64.json ----- - -. Generate the lines to append to your blueprint using the release info for your CPU architecture by running the following command: -+ -[source,terminal] ----- -$ jq -r '.images | .[] | ("[[containers]]\nsource = \"" + . + "\"\n")' ./usr/share/microshift/release/release-$(uname -m).json ----- -+ -.Brief output sample -+ -[source, toml] ----- -[[containers]] -source = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9945c3f5475a37e145160d2fe6bb21948f1024a856827bc9e7d5bc882f44a750" - -[[containers]] -source = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:82cfef91557f9a70cff5a90accba45841a37524e9b93f98a97b20f6b2b69e5db" ----- diff --git a/_unused_topics/microshift-man-config-ovs-bridge.adoc b/_unused_topics/microshift-man-config-ovs-bridge.adoc deleted file mode 100644 index bf6226f997ae..000000000000 --- a/_unused_topics/microshift-man-config-ovs-bridge.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//FIXME: need updated config procedure for customers that will persist across reboots -//this module content is unused as of 4.13 - -//=== Manually configuring OVS bridge br-ex -//.Procedure -//Manually configure the OVS bridge br-ex by running the following commands. - -//* Initiate OVS: -//+ -//[source, terminal] -//---- -//$ sudo systemctl enable openvswitch --now -//---- -//* Add the network bridge: -//+ -//[source, terminal] -//---- -//$ sudo ovs-vsctl add-br br-ex -//---- -//* Add the interface to the network bridge: -//+ -//[source, terminal] -//---- -//$ sudo ovs-vsctl add-port br-ex -//---- -//The `` is the network interface name where the node IP address is assigned. -//* Get the bridge up and running: -//+ -//[source, terminal] -//---- -//$ sudo ip link set br-ex up -//---- -//* After `br-ex up` is running, assign the node IP address to `br-ex` bridge: -//[source, terminal] -//---- -//$ sudo ... -//---- -//[NOTE] -//Adding a physical interface to `br-ex` bridge will disconnect the ssh connection on the node IP address. \ No newline at end of file diff --git a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc b/_unused_topics/microshift-nodeport-unreachable-workaround.adoc deleted file mode 100644 index 4bef2a62fce3..000000000000 --- a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * module may be unused in 4.13 - -:_content-type: PROCEDURE -[id="microshift-nodeport-unreachable-workaround_{context}"] -= Manually restarting the `ovnkube-master` pod to resume node port traffic - -After you install {product-title}, NodePort service traffic might stop. To troubleshoot this issue, manually restart the `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* A cluster installed on infrastructure configured with the Open Virtual Network (OVN)-Kubernetes network plugin. -* Access to the `kubeconfig` file. -* The KUBECONFIG environment variable is set. - -.Procedure - -Run the commands listed in each step that follows to restore the `NodePort` service traffic after you install{product-title}: - -. Find the name of the ovn-master pod that you want to restart by running the following command: -+ -[source, terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}') ----- - -. Force a restart of the of the ovnkube-master pod by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- - -. Optional: To confirm that the ovnkube-master pod restarted, run the following command: -+ -[source, terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -If the pod restarted, the listing of the running pods shows a different ovnkube-master pod name and age consistent with the procedure you just completed. - -. Verify that the `NodePort` service can now be reached. - diff --git a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc b/_unused_topics/monitoring-configuring-etcd-monitoring.adoc deleted file mode 100644 index 66e1144babb9..000000000000 --- a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="configuring-etcd-monitoring_{context}"] -= Configuring etcd monitoring - -If the `etcd` service does not run correctly, successful operation of the whole {product-title} cluster is in danger. Therefore, it is reasonable to configure monitoring of `etcd`. - -.Procedure - -. Verify that the monitoring stack is running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get pods -NAME READY STATUS RESTARTS AGE -alertmanager-main-0 3/3 Running 0 34m -alertmanager-main-1 3/3 Running 0 33m -alertmanager-main-2 3/3 Running 0 33m -cluster-monitoring-operator-67b8797d79-sphxj 1/1 Running 0 36m -grafana-c66997f-pxrf7 2/2 Running 0 37s -kube-state-metrics-7449d589bc-rt4mq 3/3 Running 0 33m -node-exporter-5tt4f 2/2 Running 0 33m -node-exporter-b2mrp 2/2 Running 0 33m -node-exporter-fd52p 2/2 Running 0 33m -node-exporter-hfqgv 2/2 Running 0 33m -prometheus-k8s-0 4/4 Running 1 35m -prometheus-k8s-1 0/4 ContainerCreating 0 21s -prometheus-operator-6c9fddd47f-9jfgk 1/1 Running 0 36m ----- - -. Open the configuration file for the cluster monitoring stack: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Under `config.yaml: |+`, add the `etcd` section. -+ -.. If you run `etcd` in static pods on your control plane nodes (also known as master nodes), you can specify the `etcd` nodes using the selector: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - selector: - openshift.io/component: etcd - openshift.io/control-plane: "true"* ----- -+ -.. If you run `etcd` on separate hosts, you must specify the nodes using IP addresses: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - ips: - - "127.0.0.1" - - "127.0.0.2" - - "127.0.0.3"* ----- -+ -If `etcd` nodes IP addresses change, you must update this list. - -. Verify that the `etcd` service monitor is now running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get servicemonitor -NAME AGE -alertmanager 35m -*etcd 1m* -kube-apiserver 36m -kube-controllers 36m -kube-state-metrics 34m -kubelet 36m -node-exporter 34m -prometheus 36m -prometheus-operator 37m ----- -+ -It might take up to a minute for the `etcd` service monitor to start. - -. Now you can navigate to the Web interface to see more information about status of `etcd` monitoring: -+ -.. To get the URL, run: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get routes -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -... -prometheus-k8s prometheus-k8s-openshift-monitoring.apps.msvistun.origin-gce.dev.openshift.com prometheus-k8s web reencrypt None ----- -+ -.. Using `https`, navigate to the URL listed for `prometheus-k8s`. Log in. - -. Ensure the user belongs to the `cluster-monitoring-view` role. This role provides access to viewing cluster monitoring UIs. For example, to add user `developer` to `cluster-monitoring-view`, run: - - $ oc adm policy add-cluster-role-to-user cluster-monitoring-view developer -+ - -. In the Web interface, log in as the user belonging to the `cluster-monitoring-view` role. - -. Click *Status*, then *Targets*. If you see an `etcd` entry, `etcd` is being monitored. -+ -image::etcd-no-certificate.png[] - -While `etcd` is being monitored, Prometheus is not yet able to authenticate against `etcd`, and so cannot gather metrics. To configure Prometheus authentication against `etcd`: - -. Copy the `/etc/etcd/ca/ca.crt` and `/etc/etcd/ca/ca.key` credentials files from the control plane node to the local machine: -+ -[subs="quotes"] ----- -$ ssh -i gcp-dev/ssh-privatekey cloud-user@35.237.54.213 -... ----- - -. Create the `openssl.cnf` file with these contents: -+ ----- -[ req ] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[ req_distinguished_name ] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, keyEncipherment, digitalSignature -extendedKeyUsage=serverAuth, clientAuth ----- - -. Generate the `etcd.key` private key file: -+ -[subs="quotes"] ----- -$ openssl genrsa -out etcd.key 2048 ----- - -. Generate the `etcd.csr` certificate signing request file: -+ -[subs="quotes"] ----- -$ openssl req -new -key etcd.key -out etcd.csr -subj "/CN=etcd" -config openssl.cnf ----- - -. Generate the `etcd.crt` certificate file: -+ -[subs="quotes"] ----- -$ openssl x509 -req -in etcd.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out etcd.crt -days 365 -extensions v3_req -extfile openssl.cnf ----- - -. Put the credentials into format used by {product-title}: -+ ----- -cat <<-EOF > etcd-cert-secret.yaml -apiVersion: v1 -data: - etcd-client-ca.crt: "$(cat ca.crt | base64 --wrap=0)" - etcd-client.crt: "$(cat etcd.crt | base64 --wrap=0)" - etcd-client.key: "$(cat etcd.key | base64 --wrap=0)" -kind: Secret -metadata: - name: kube-etcd-client-certs - namespace: openshift-monitoring -type: Opaque -EOF ----- -+ -This creates the *_etcd-cert-secret.yaml_* file - -. Apply the credentials file to the cluster: - ----- -$ oc apply -f etcd-cert-secret.yaml ----- - -. Visit the "Targets" page of the Web interface again. Verify that `etcd` is now being correctly monitored. It might take several minutes for changes to take effect. -+ -image::etcd-monitoring-working.png[] diff --git a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc b/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc deleted file mode 100644 index 830af9f53b55..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch-pagerduty_{context}"] -== Dead man's switch PagerDuty - -https://www.pagerduty.com/[PagerDuty] supports "Dead man's switch" through an integration called https://deadmanssnitch.com/[Dead Man's Snitch]. You can enable it. - -.Procedure - -* Add a `PagerDuty` configuration to the default `deadmansswitch` receiver. -+ -For example, you can configure Dead Man's Snitch to page the operator if the "Dead man's switch" alert is silent for 15 minutes. With the default Alertmanager configuration, the Dead man's switch alert is repeated every five minutes. If Dead Man's Snitch triggers after 15 minutes, it indicates that the notification has been unsuccessful at least twice. - -[role="_additional-resources"] -.Additional resources - -// FIXME describe the procedure instead of linking * To learn how to add a `PagerDuty` configuration to the default `deadmansswitch` receiver, see LINK. -* To learn how to configure Dead Man's Snitch for PagerDuty, see https://www.pagerduty.com/docs/guides/dead-mans-snitch-integration-guide/[Dead Man’s Snitch Integration Guide]. diff --git a/_unused_topics/monitoring-dead-mans-switch.adoc b/_unused_topics/monitoring-dead-mans-switch.adoc deleted file mode 100644 index db473bd2202e..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch_{context}"] -== Dead man's switch - -{product-title} Monitoring ships with a "Dead man's switch" to ensure the availability of the monitoring infrastructure. - -The "Dead man's switch" is a simple Prometheus alerting rule that always triggers. The Alertmanager continuously sends notifications for the dead man's switch to the notification provider that supports this functionality. This also ensures that communication between the Alertmanager and the notification provider is working. - -This mechanism is supported by PagerDuty to issue alerts when the monitoring system itself is down. - diff --git a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc b/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc deleted file mode 100644 index ea4c252779cf..000000000000 --- a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-dynamically-provisioned-storage_{context}"] -= Enabling dynamically-provisioned storage - -Instead of statically-provisioned storage, you can use dynamically-provisioned storage. - -.Procedure - -. To enable dynamic storage for Prometheus and Alertmanager, set the following parameters to `true` in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_enabled` (Default: false) -* `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` (Default: false) -+ -. Optional: After you enable dynamic storage, you can also set the `storageclass` for the persistent volume claim for each component in the following parameters in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_class_name` (default: "") -* `openshift_cluster_monitoring_operator_alertmanager_storage_class_name` (default: "") -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - -[role="_additional-resources"] -.Additional resources - -* See https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/[Dynamic Volume Provisioning] for details. diff --git a/_unused_topics/monitoring-enabling-persistent-storage.adoc b/_unused_topics/monitoring-enabling-persistent-storage.adoc deleted file mode 100644 index b9bd16207584..000000000000 --- a/_unused_topics/monitoring-enabling-persistent-storage.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-persistent-storage_{context}"] -= Enabling persistent storage - -By default, persistent storage is disabled for both Prometheus time-series data and for Alertmanager notifications and silences. You can configure the cluster to persistently store any one of them or both. - -.Procedure - -* To enable persistent storage of Prometheus time-series data, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -+ -To enable persistent storage of Alertmanager notifications and silences, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` - diff --git a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc b/_unused_topics/monitoring-full-list-of-configuration-variables.adoc deleted file mode 100644 index 65fa04e17809..000000000000 --- a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="full-list-of-configuration-variables_{context}"] -= Full list of configuration variables - -This table contains the full list of the inventory file variables for configuring the Cluster Monitoring Operator: - -.Cluster Monitoring Operator Ansible variables -[options="header"] -|=== - -|Variable |Description - -|`openshift_cluster_monitoring_operator_install` -| Deploy the Cluster Monitoring Operator if `true`. Otherwise, undeploy. This variable is set to `true` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_capacity` -| The persistent volume claim size for each of the Prometheus instances. This variable applies only if `openshift_cluster_monitoring_operator_prometheus_storage_enabled` is set to `true`. Defaults to `50Gi`. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_capacity` -| The persistent volume claim size for each of the Alertmanager instances. This variable applies only if `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` is set to `true`. Defaults to `2Gi`. - -|`openshift_cluster_monitoring_operator_node_selector` -| Set to the desired, existing [node selector] to ensure that pods are placed onto nodes with specific labels. Defaults to `node-role.kubernetes.io/infra=true`. - -|`openshift_cluster_monitoring_operator_alertmanager_config` -| Configures Alertmanager. - -|`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -| Enable persistent storage of Prometheus' time-series data. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` -| Enable persistent storage of Alertmanager notifications and silences. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_prometheus_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. -|=== - -[role="_additional-resources"] -.Additional resources -// FIXME add link once doc is available -// Used to point to ../admin_guide/scheduling/node_selector.adoc[Advanced Scheduling and Node Selectors] -// * See LINK for more information on node selectors. diff --git a/_unused_topics/monitoring-grouping-alerts.adoc b/_unused_topics/monitoring-grouping-alerts.adoc deleted file mode 100644 index ad71f4988995..000000000000 --- a/_unused_topics/monitoring-grouping-alerts.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="grouping-alerts_{context}"] -== Grouping alerts - -Once alerts are firing against the Alertmanager, it must be configured to know how to logically group them. This procedure shows how to configure alert grouping: - -.Procedure - -_FIXME get missing info and complete the procedure_ - -For this example, a new route will be added to reflect alert routing of the "frontend" team. - -. Add new routes. Multiple routes may be added beneath the original route, typically to define the receiver for the notification. This example uses a matcher to ensure that only alerts coming from the service `example-app` are used: -+ - global: - resolve_timeout: 5m - route: - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: default - routes: - - match: - alertname: DeadMansSwitch - repeat_interval: 5m - receiver: deadmansswitch - - match: - service: example-app - routes: - - match: - severity: critical - receiver: team-frontend-page - receivers: - - name: default - - name: deadmansswitch -+ -The sub-route matches only on alerts that have a severity of `critical`, and sends them via the receiver called `team-frontend-page`. As the name indicates, someone should be paged for alerts that are critical. - - diff --git a/_unused_topics/monitoring-monitoring-overview.adoc b/_unused_topics/monitoring-monitoring-overview.adoc deleted file mode 100644 index 8b1096b717b0..000000000000 --- a/_unused_topics/monitoring-monitoring-overview.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="monitoring-overview_{context}"] -= Monitoring overview - -{product-title} ships with a pre-configured, pre-installed, and self-updating monitoring stack that is based on the link:https://prometheus.io/[Prometheus] open source project and its wider eco-system. It provides monitoring of cluster components and ships with a set of alerts to immediately notify the cluster administrator about any occurring problems and a set of link:https://grafana.com/[Grafana] dashboards. - -The monitoring stack includes these components: - -* Cluster Monitoring Operator -* Prometheus Operator -* Prometheus -* Prometheus Adapter -* Alertmanager -* kube-state-metrics -* node-exporter -* Grafana - -The {product-title} Cluster Monitoring Operator (CMO) is the central component of the stack. It watches over the deployed monitoring components and resources and ensures that they are always up to date. - -The Prometheus Operator (PO) creates, configures, and manages Prometheus and Alertmanager instances. It also automatically generates monitoring target configurations based on familiar Kubernetes label queries. - -The Prometheus Adapter exposes cluster resource metrics (CPU and memory utilization) API for horizontal pod autoscaling. - -Node-exporter is an agent deployed on every node to collect metrics about it. - -The kube-state-metrics exporter agent converts Kubernetes objects to metrics consumable by Prometheus. - -All the components of the monitoring stack are monitored by the stack. Additionally, the stack monitors: - -* cluster-version-operator -* image-registry -* kube-apiserver -* kube-apiserver-operator -* kube-controller-manager -* kube-controller-manager-operator -* kube-scheduler -* kubelet -* monitor-sdn -* openshift-apiserver -* openshift-apiserver-operator -* openshift-controller-manager -* openshift-controller-manager-operator -* openshift-svcat-controller-manager-operator -* telemeter-client - -All these components are automatically updated. - -Other {product-title} framework components might be exposing metrics as well. See their respective documentation. - -[NOTE] -==== -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. -==== - diff --git a/_unused_topics/monitoring-setting-persistent-storage-size.adoc b/_unused_topics/monitoring-setting-persistent-storage-size.adoc deleted file mode 100644 index 619f6133c9bc..000000000000 --- a/_unused_topics/monitoring-setting-persistent-storage-size.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="setting-persistent-storage-size_{context}"] -= Setting persistent storage size - -You can specify the size of the persistent volume claim for Prometheus and Alertmanager. - -.Procedure - -* Change these Ansible variables: -+ --- -* `openshift_cluster_monitoring_operator_prometheus_storage_capacity` (default: 50Gi) -* `openshift_cluster_monitoring_operator_alertmanager_storage_capacity` (default: 2Gi) --- -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - diff --git a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc b/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc deleted file mode 100644 index 4f42970573fc..000000000000 --- a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="update-and-compatibility-guarantees_{context}"] -= Update and compatibility guarantees - -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. This document describes known pitfalls of which types of configuration and customization are unsupported, as well as misuse of resources provided by {product-title} Monitoring. All configuration options described in this topic are explicitly supported. - -*Modification of {product-title} monitoring resources* - -The {product-title} Monitoring stack ensures its resources are _always_ in the state it expects them to be. If they are modified, {product-title} Monitoring will ensure that this will be reset. Nonetheless it is possible to pause this behavior, by setting the `paused` field in the `AppVersion` called `openshift-monitoring`. Setting the {product-title} Monitoring stack to be paused, stops all future updates and will cause modification of the otherwise managed resources. If resources are modified in an uncontrolled manner, this will cause undefined behavior during updates. - -To ensure compatible and functioning updates, the `paused` field must be set to `false` on upgrades. - -*Usage of resources created by {product-title} monitoring* - -{product-title} Monitoring creates a number of resources. These resources are not meant to be used by any other resources, as there are no guarantees about their backward compatibility. For example, a `ClusterRole` called `prometheus-k8s` is created, and has very specific roles that exist solely for the cluster monitoring Prometheus pods to be able to access the resources it requires access to. All of these resources have no compatibility guarantees going forward. While some of these resources may incidentally have the necessary information for RBAC purposes for example, they can be subject to change in any upcoming release, with no backward compatibility. - -If the `Role` or `ClusterRole` objects that are similar are needed, we recommend creating a new object that has exactly the permissions required for the case at hand, rather than using the resources created and maintained by {product-title} Monitoring. diff --git a/_unused_topics/mounting-local-volumes.adoc b/_unused_topics/mounting-local-volumes.adoc deleted file mode 100644 index c0278465a113..000000000000 --- a/_unused_topics/mounting-local-volumes.adoc +++ /dev/null @@ -1,29 +0,0 @@ -[id="mounting-local-volumes_{context}"] -= Mounting local volumes - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -.Procedure - -. Mount all volumes into the `*/mnt/local-storage//*` path: -+ ----- -# device name # mount point # FS # options # extra -/dev/sdb1 /mnt/local-storage/ssd/disk1 ext4 defaults 1 2 -/dev/sdb2 /mnt/local-storage/ssd/disk2 ext4 defaults 1 2 -/dev/sdb3 /mnt/local-storage/ssd/disk3 ext4 defaults 1 2 -/dev/sdc1 /mnt/local-storage/hdd/disk1 ext4 defaults 1 2 -/dev/sdc2 /mnt/local-storage/hdd/disk2 ext4 defaults 1 2 ----- -+ -Administrators must create local devices as needed using any method such as disk partition or LVM, create suitable file systems on these devices, and mount these devices using a script or /etc/fstab entries - -. Make all volumes accessible to the processes running within the Docker containers: -+ ----- -$ chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/ ----- diff --git a/_unused_topics/nodes-cluster-disabling-features-list.adoc b/_unused_topics/nodes-cluster-disabling-features-list.adoc deleted file mode 100644 index ea94c7d6509d..000000000000 --- a/_unused_topics/nodes-cluster-disabling-features-list.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-disabling-features.adoc - -[id="nodes-cluster-disabling-features-list_{context}"] -= List of feature gates - -Use the following list to determine the name of the feature you want to disable: - -[options="header"] -|=== -| Feature gate| Description | Default - -| *AdvancedAuditing* -| Enables a more general API auditing pipeline, which includes support for pluggable output backends and an audit policy specifying how different requests should be audited. -| True - -| *APIListChunking* -| Enables the API clients to retrieve LIST or GET resources from API server in chunks. -| True - -| *APIResponseCompression* -| Enables the compression of API responses for LIST or GET requests. -| False - -| *AppArmor* -| Enables AppArmor-based mandatory access control on Linux nodes when using Docker. For more information, see the link:https://kubernetes.io/docs/tutorials/clusters/apparmor/[Kubernetes AppArmor documentation]. -| True - -| *AttachVolumeLimit* -| Adds support for volume plugins to report node specific volume limits. -| True - -| *BalanceAttachedNodeVolumes* -| Includes volume count on node to be considered for balanced resource allocation while scheduling. A node which has closer CPU, memory utilization, and volume count is favored by scheduler while making decisions. -| False - -| *BlockVolume* -| Enables the definition and consumption of raw block devices in pods. For more information, see -the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#raw-block-volume-support[Kubernetes Raw Block Volume Support]. -| False - -| *CPUManager* -| Enables Container-level CPU affinity support. -For more information, see Using CPU Manager. -| True - -| *CPUCFSQuotaPeriod* -| Enables nodes to change CPUCFSQuotaPeriod. -| False - -| *CRIcontainerLogRotation* -| Enables Container log rotation for the CRI Container runtime. -| True - -| *CSIBlockVolume* -| Enables CSI to use raw block storage volumes. -| False - -| *CSIDriverRegistry* -| Enables all logic related to the CSIDriver API object in csi.storage.k8s.io. -| False - -| *CSINodeInfo* -| Enables all logic related to the CSINodeInfo API object in csi.storage.k8s.io. -| False - -| *CSIPersistentVolume* -| Enables discovering and mounting volumes provisioned through a CSI (Container Storage Interface) compatible volume plugin. For more information, -see the link:https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md[CSI Volume Plugins in Kubernetes Design Documentation]. -| True - -| *CustomPodDNS* -| Enables customizing the DNS settings for a pod using the *dnsConfig* property. -| True - -| *Debugcontainers* -| Enables running a debugging Container in a pod namespace to troubleshoot a running Pod. -| False - -| *DevicePlugins* -| Enables device plug-in-based resource provisioning on nodes. -| True - -| *DryRun* -| Allows requests to be processed but not stored, so that validation, merging, mutation can be tested without committing. -| False - -| *DynamicKubeletConfig* -| Enables the dynamic configuration in a cluster. -| True - -| *EnableEquivalenceClassCache* -| Enables the scheduler to cache equivalence of nodes when scheduling Pods. -| False - -| *ExpandPersistentVolumes* -| Enables the ability to Expand persistent volumes. -| True - -| *ExpandInUsePersistentVolumes* -| Enables the ability to expand persistent volumes' file system without unmounting volumes. -| False - -| *ExperimentalHostUserNamespaceDefaultingGate* -| Enables the disabling of user namespaces. This is for Containers that are using other host projects, host mounts, or Containers that are privileged or using specific non-project capabilities, such as MKNODE, SYS_MODULE, and so forth. This should only be enabled if user project remapping is enabled in the Docker daemon. -| False - -| *GCERegionalPersistentDisk* -| Enables the GCE Persistent Disk feature. -| True - -| *HugePages* -| Enables the allocation and consumption of pre-allocated huge pages. -| True - -| *HyperVcontainer* -| Enables Hyper-V isolation for Windows Containers. -| False - -| *Intializers* -| Enables the dynamic admission control as an extension to the built-in admission controllers. -| False - -| *KubeletPluginsWatcher* -| Enables probe based plugin watcher utility for discovering Kubelet plugins. -| True - -| *LocalStorageCapacityIsolation* -| Enables the consumption of local ephemeral storage and the `sizeLimit` property of an *emptyDir* volume. -| False - -| *Mountcontainers* -| Enables using utility Containers on the host as the volume mount. -| False - -| *MountPropagation* -| Enables sharing a volume mounted by one Container to other Containers or pods. -| True - -| *NodeLease* -| Kubelet uses the new Lease API to report node heartbeats, (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. -| False - -| *PersistentLocalVolumes* -| Enables the usage of local volume pods. Pod affinity has to be specified if requesting a local volume. -| True - -| *PodPriority* -| Enables the descheduling and preemption of pods based on their priorities. -| True - -| *PodReadinessGates* -| Supports Pod Readiness. -| True - -| *PodShareProcessNamespace* -| Allows all containers in a pod to share a process namespace. -| True - -| *ProcMountType* -| Enables control over ProcMountType for containers. -| False - -| *QOSReserved* -| Allows resource reservations at the QoS level preventing pods at lower QoS levels from bursting into resources requested at higher QoS levels (memory only for now). -| False - -| *ResourceLimitsPriorityFunction* -| Enables a scheduler priority function that assigns a lowest possible score of `1` to a node that satisfies at least one of the input pod CPU and memory limits. The intent is to break ties between nodes with same scores. -| False - -| *ResourceQuotaScopeSelectors* -| Enables resource quota scope selectors. -| True - -| *RotateKubeletClientCertificate* -| Enables the rotation of the client TLS certificate on the cluster. -| True - -| *RotateKubeletServerCertificate* -| Enables the rotation of the server TLS certificate on the cluster. -| True - -| *RunAsGroup* -| Enables control over the primary group ID set on the init processes of Containers. -| False - -| *RuntimeClass* -| Enables RuntimeClass, for selecting between multiple runtimes to run a pod. -| False - -| *ScheduleDaemonSetPods* -| Enables DaemonSet pods to be scheduled by the default scheduler instead of the DaemonSet controller. -| True - -| *SCTPSupport* -| Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition. -| False - -| *ServiceNodeExclusion* -| Enables the exclusion of nodes from load balancers created by a cloud provider. -| False - -| *StorageObjectInUseProtection* -| Enables postponing the deletion of persistent volume or persistent volume claim objects if they are still being used. -| True - -| *StreamingProxyRedirects* -| Instructs the API server to intercept and follow redirects from the backend kubelet for streaming requests. -| True - -| *SupportIPVSProxyMode* -| Enables providing in-cluster service load balancing using IP virtual servers. -| True - -| *SupportPodPidsLimit* -| Enables support for limiting the number of processes (PIDs) running in a pod. -| True - -| *Sysctls* -| Enables pods to set sysctls on a pod. -| True - -| *TaintBasedEvictions* -| Enables evicting pods from nodes based on taints on nodes and tolerations on pods. -| False - -| *TaintNodesByCondition* -| Enables automatic tainting nodes based on node conditions. -| True - -| *TokenRequest* -| Enables the TokenRequest endpoint on service account resources. -| True - -| *TokenRequestProjection* -| Enables ServiceAccountTokenVolumeProjection support in ProjectedVolumes. -| True - -| *TTLAfterFinished* -| Allows TTL controller to clean up Pods and Jobs after they finish. -| False - -| *ValidateProxyRedirects* -| Controls whether the apiserver should validate that redirects are only followed to the same host. Only used if StreamingProxyRedirects is enabled. -| False - -| *VolumeScheduling* -| Enables volume-topology-aware scheduling and make the persistent volume claim (PVC) binding aware of scheduling decisions. It also enables the usage of local volumes types when used together with the *PersistentLocalVolumes* feature gate. -| True - -| *VolumeSnapshotDataSource* -| Enables volume snapshot data source support. -| False - -| *VolumeSubpath* -| Allows mounting a subpath of a volume in a container. Do not remove this feature gate even though it's GA. -| True - -| *VolumeSubpathEnvExpansion* -| Allows subpath environment variable substitution. Only applicable if the VolumeSubpath feature is also enabled. -| False - -|=== diff --git a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc b/_unused_topics/nodes-cluster-overcommit-node-memory.adoc deleted file mode 100644 index e1da11dcbb73..000000000000 --- a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc - -[id="nodes-cluster-overcommit-node-memory_{context}"] - -= Reserving memory across quality of service tiers - -You can use the `qos-reserved` parameter to specify a percentage of memory to be reserved -by a pod in a particular QoS level. This feature attempts to reserve requested resources to exclude pods -from lower OoS classes from using resources requested by pods in higher QoS classes. - -By reserving resources for higher QOS levels, pods that do not have resource limits are prevented from encroaching on the resources -requested by pods at higher QoS levels. - -.Prerequisites - -. Obtain the label associated with the static Machine Config Pool CRD for the type of node you want to configure. -Perform one of the following steps: - -.. View the Machine Config Pool: -+ ----- -$ oc describe machineconfigpool ----- -+ -For example: -+ -[source,yaml] ----- -$ oc describe machineconfigpool worker - -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: 2019-02-08T14:52:39Z - generation: 1 - labels: - custom-kubelet: small-pods <1> ----- -<1> If a label has been added it appears under `labels`. - -.. If the label is not present, add a key/value pair: -+ ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - labels: - custom-kubelet: small-pods - name: worker ----- -==== - -.Procedure - -. Create a Custom Resource (CR) for your configuration change. -+ -.Sample configuration for a disabling CPU limits -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: disable-cpu-units <1> -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: small-pods <2> - kubeletConfig: - cgroups-per-qos: - - true - cgroup-driver: - - 'systemd' - cgroup-root: - - '/' - qos-reserved: <3> - - 'memory=50%' ----- -<1> Assign a name to CR. -<2> Specify the label to apply the configuration change. -<3> Specifies how pod resource requests are reserved at the QoS level. -{product-title} uses the `qos-reserved` parameter as follows: -- A value of `qos-reserved=memory=100%` will prevent the `Burstable` and `BestEffort` QOS classes from consuming memory -that was requested by a higher QoS class. This increases the risk of inducing OOM -on `BestEffort` and `Burstable` workloads in favor of increasing memory resource guarantees -for `Guaranteed` and `Burstable` workloads. -- A value of `qos-reserved=memory=50%` will allow the `Burstable` and `BestEffort` QOS classes -to consume half of the memory requested by a higher QoS class. -- A value of `qos-reserved=memory=0%` -will allow a `Burstable` and `BestEffort` QoS classes to consume up to the full node -allocatable amount if available, but increases the risk that a `Guaranteed` workload -will not have access to requested memory. This condition effectively disables this feature. diff --git a/_unused_topics/nodes-containers-using-about.adoc b/_unused_topics/nodes-containers-using-about.adoc deleted file mode 100644 index 0213c6acecc1..000000000000 --- a/_unused_topics/nodes-containers-using-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-about_{context}"] -= Understanding Containers - -The basic units of {product-title} applications are called _containers_. -link:https://access.redhat.com/articles/1353593[Linux container technologies] -are lightweight mechanisms for isolating running processes so that they are -limited to interacting with only their designated resources. - -Many application instances can be running in containers on a single host without -visibility into each others' processes, files, network, and so on. Typically, -each container provides a single service (often called a "micro-service"), such -as a web server or a database, though containers can be used for arbitrary -workloads. - -The Linux kernel has been incorporating capabilities for container technologies -for years. {product-title} and -Kubernetes add the ability to orchestrate containers across -multi-host installations. diff --git a/_unused_topics/nodes-containers-using-ssh.adoc b/_unused_topics/nodes-containers-using-ssh.adoc deleted file mode 100644 index 868386626226..000000000000 --- a/_unused_topics/nodes-containers-using-ssh.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-ssh_{context}"] -= Opening a Remote Shell to Containers - -The `oc rsh` command allows you to locally access and manage tools that are on -the system. The secure shell (SSH) is the underlying technology and industry -standard that provides a secure connection to the application. Access to -applications with the shell environment is protected and restricted with -Security-Enhanced Linux (SELinux) policies. - -While in the remote shell, you can issue commands as if you are inside the -container and perform local operations like monitoring, debugging, and using CLI -commands specific to what is running in the container. - -For example, in a MySQL container, you can count the number of records in the -database by invoking the `mysql` command, then using the prompt to type in the `SELECT` command. You can -also use commands like `ps(1)` and `ls(1)` for validation. - -`BuildConfigs` and `DeployConfigs` map out how you want things to look and -pods (with containers inside) are created and dismantled as needed. Your changes -are not persistent. If you make changes directly within the container and that -container is destroyed and rebuilt, your changes will no longer exist. - -[NOTE] -==== -You can use the `oc exec` c to execute a command remotely. However, the `oc rsh` command provides an easier way -to keep a remote shell open persistently. -==== - -.Procedure - -. Open a console on a system networked to connect to the node where your pod is located. - -. Open a remote shell session to a container: -+ ----- -$ oc rsh ----- - -[NOTE] -==== -For help with usage, options, and to see examples: ----- -$ oc rsh -h ----- -==== diff --git a/_unused_topics/nodes-nodes-audit-log-advanced.adoc b/_unused_topics/nodes-nodes-audit-log-advanced.adoc deleted file mode 100644 index e790f3ec446c..000000000000 --- a/_unused_topics/nodes-nodes-audit-log-advanced.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-audit-log.adoc - -[id="nodes-nodes-audit-log-advanced_{context}"] -= Advanced Audit - -*DEPRECATED for the moment* - -The advanced audit feature provides several improvements over the -basic audit functionality, including fine-grained events filtering and multiple output back ends. - -To enable the advanced audit feature, provide the following values in the -`openshift_master_audit_config` parameter: - ----- -openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/lib/origin/oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5, "policyFile": "/etc/origin/master/adv-audit.yaml", "logFormat":"json"} ----- - -[IMPORTANT] -==== -The policy file *_/etc/origin/master/adv-audit.yaml_* must be available on each control plane node. -==== - - -The following table contains additional options you can use. - -.Advanced Audit Configuration Parameters - -[cols="3a,6a",options="header"] -|=== -| Parameter Name | Description - -|`policyFile` -|Path to the file that defines the audit policy configuration. - -|`policyConfiguration` -|An embedded audit policy configuration. - -|`logFormat` -|Specifies the format of the saved audit logs. Allowed values are `legacy` (the -format used in basic audit), and `json`. - -|`webHookKubeConfig` -|Path to a `.kubeconfig`-formatted file that defines the audit webhook -configuration, where the events are sent to. - -|`webHookMode` -|Specifies the strategy for sending audit events. Allowed values are `block` -(blocks processing another event until the previous has fully processed) and -`batch` (buffers events and delivers in batches). -|=== - -[IMPORTANT] -==== -To enable the advanced audit feature, you must provide either `policyFile` *or* -`policyConfiguration` describing the audit policy rules: -==== - -.Sample Audit Policy Configuration -[source,yaml] ----- -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: - - # Do not log watch requests by the "system:kube-proxy" on endpoints or services - - level: None <1> - users: ["system:kube-proxy"] <2> - verbs: ["watch"] <3> - resources: <4> - - group: "" - resources: ["endpoints", "services"] - - # Do not log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] <5> - nonResourceURLs: <6> - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] <7> - - # Log configmap and secret changes in all other namespaces at the metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata <1> - - # Log login failures from the web console or CLI. Review the logs and refine your policies. - - level: Metadata - nonResourceURLs: - - /login* <8> - - /oauth* <9> ----- -<1> There are four possible levels every event can be logged at: -+ -* `None` - Do not log events that match this rule. -+ -* `Metadata` - Log request metadata (requesting user, time stamp, resource, verb, -etc.), but not request or response body. This is the same level as the one used -in basic audit. -+ -* `Request` - Log event metadata and request body, but not response body. -+ -* `RequestResponse` - Log event metadata, request, and response bodies. -<2> A list of users the rule applies to. An empty list implies every user. -<3> A list of verbs this rule applies to. An empty list implies every verb. This is - Kubernetes verb associated with API requests (including `get`, `list`, `watch`, - `create`, `update`, `patch`, `delete`, `deletecollection`, and `proxy`). -<4> A list of resources the rule applies to. An empty list implies every resource. -Each resource is specified as a group it is assigned to (for example, an empty for -Kubernetes core API, batch, build.openshift.io, etc.), and a resource list from -that group. -<5> A list of groups the rule applies to. An empty list implies every group. -<6> A list of non-resources URLs the rule applies to. -<7> A list of namespaces the rule applies to. An empty list implies every namespace. -<8> Endpoint used by the web console. -<9> Endpoint used by the CLI. - -For more information on advanced audit, see the -link:https://kubernetes.io/docs/tasks/debug-application-cluster/audit[Kubernetes -documentation] diff --git a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc b/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc deleted file mode 100644 index 8c8b62e45eeb..000000000000 --- a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -[id="nodes-nodes-resources-configuring-setting_{context}"] -= Viewing Node Allocatable Resources and Capacity - -As an administrator, you can view the current capacity and allocatable resources of a specific node. - -.Procedure - -To see a node's current capacity and allocatable resources: - -. Run the following command: - ----- -$ oc get node/ -o yaml ----- - -. Locate the following section in the output: -+ -[source,yaml] ----- -... -status: -... - allocatable: - cpu: "4" - memory: 8010948Ki - pods: "110" - capacity: - cpu: "4" - memory: 8010948Ki - pods: "110" -... ----- diff --git a/_unused_topics/nodes-nodes-working-adding.adoc b/_unused_topics/nodes-nodes-working-adding.adoc deleted file mode 100644 index 5efb65bc72ba..000000000000 --- a/_unused_topics/nodes-nodes-working-adding.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -[id="nodes-nodes-working-adding_{context}"] -= Adding new nodes to your cluster - -//// -this entire section is obsolete for 4.0. nodes are added to the cluster using MachineSets in 4.0. -https://github.com/openshift/openshift-docs/pull/12964#discussion_r242781872 -//// diff --git a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc b/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc deleted file mode 100644 index e2271d47ee3d..000000000000 --- a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc +++ /dev/null @@ -1,83 +0,0 @@ -== Supported metrics - -KEDA emits the following Kubernetes events: - -.Metrics -[cols="3a,5a,5a",options="header"] -|=== - -|Metric |Description |API version - -|ScaledObjectReady -|Normal -|On the first time a ScaledObject is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledJobReady -|Normal -|On the first time a ScaledJob is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledObjectCheckFailed -|Warning -|If the check validation for a ScaledObject fails - -|ScaledJobCheckFailed -|Warning -|If the check validation for a ScaledJob fails - -|ScaledObjectDeleted -|Normal -|When a ScaledObject is deleted and removed from KEDA watch - -|ScaledJobDeleted -|Normal -|When a ScaledJob is deleted and removed from KEDA watch - -|KEDAScalersStarted -|Normal -|When Scalers watch loop have started for a ScaledObject or ScaledJob - -|KEDAScalersStopped -|Normal -|When Scalers watch loop have stopped for a ScaledObject or a ScaledJob - -|KEDAScalerFailed -|Warning -|When a Scaler fails to create or check its event source - -|KEDAScaleTargetActivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 1 - -|KEDAScaleTargetDeactivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 0 - -|KEDAScaleTargetActivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 1 - -|KEDAScaleTargetDeactivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 0 - -|KEDAJobsCreated -|Normal -|When KEDA creates jobs for a ScaledJob - -|TriggerAuthenticationAdded -|Normal -|When a new TriggerAuthentication is added - -|TriggerAuthenticationDeleted -|Normal -|When a TriggerAuthentication is deleted - -|ClusterTriggerAuthenticationAdded -|Normal -|When a new ClusterTriggerAuthentication is added - -|ClusterTriggerAuthenticationDeleted -|Normal -|When a ClusterTriggerAuthentication is deleted - -|=== diff --git a/_unused_topics/nodes-pods-daemonsets-pods.adoc b/_unused_topics/nodes-pods-daemonsets-pods.adoc deleted file mode 100644 index 74520493154b..000000000000 --- a/_unused_topics/nodes-pods-daemonsets-pods.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-daemonsets.adoc - -[id="nodes-pods-daemonsets-pods_{context}"] -= About Scheduling DaemonSets with the default scheduler - -In {product-title}, the scheduler selects the Node that a Pod runs on. However, in previous versions of {product-title}, DaemonSet pods were created and scheduled by the DaemonSet controller. - -The `ScheduleDaemonSetPods` feature, enabled by default, forces {product-title} to schedule DaemonSets using the default scheduler, instead of the DaemonSet controller. -The DaemonSet controller adds the `NodeAffinity` parameter to the DaemonSet pods, instead of the `.spec.nodeName` parameter. The default scheduler then binds the pod to the target host. If the DaemonSet pod is already configured for node affinity, the affinity is replaced. The DaemonSet controller only performs these operations when creating or modifying DaemonSet pods, and no changes are made to the `spec.template` parameter of the DaemonSet. - ----- -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - target-host-name ----- - -In addition, the DaemonSet controller adds the `node.kubernetes.io/unschedulable:NoSchedule` toleration to DaemonSet Pods. The default scheduler ignores unschedulable Nodes when scheduling DaemonSet Pods. diff --git a/_unused_topics/nodes-pods-priority-examples.adoc b/_unused_topics/nodes-pods-priority-examples.adoc deleted file mode 100644 index 92d898eb2d3f..000000000000 --- a/_unused_topics/nodes-pods-priority-examples.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -[id="nodes-pods-priority-examples_{context}"] -= Pod priority example scenarios - -Pod priority and preemption assigns a priority to pods for scheduling. The scheduler will preempt (evict) lower-priority pods to schedule higher-priority pods. - -Typical preemption scenario:: -*Pod P* is a pending pod. - -. The scheduler locates *Node N*, where the removal of one or more pods enables *Pod P* to be scheduled on that node. - -. The scheduler deletes the lower-priority pods from the *Node N* and schedules *Pod P* on the node. - -. The `nominatedNodeName` field of *Pod P* is set to the name of *Node N*. - -[NOTE] -==== -*Pod P* is not necessarily scheduled to the nominated node. -==== - -Preemption and termination periods:: -The preempted pod has a long termination period. - -. The scheduler preempts a lower-priority pod on *Node N*. - -. The scheduler waits for the pod to gracefully terminate. - -. For other scheduling reasons, *Node M* becomes available. - -. The scheduler can then schedule *Pod P* on *Node M*. - -//// -Under consideration for future release -Pod priority and cross-node preemption:: -*Pod P* is being considered for *Node N*. - -. *Pod Q* is running on another node in the same zone as *Node N*. - -. *Pod P* has zone-wide anti-affinity with *Pod Q*, meaning *Pod P* cannot be scheduled in the same zone as *Pod Q*. -+ -There are no other cases of anti-affinity between *Pod P* and other pods in the zone. - -. To schedule *Pod P* on *Node N*, the scheduler must preempt *Pod Q* to remove the pod anti-affinity violation, allowing the scheduler to schedule *Pod P* on *Node N*. - -The scheduler can preempt *Pod Q*, but scheduler does not perform cross-node preemption. So, Pod P will be deemed unschedulable on Node N. -//// - diff --git a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc b/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc deleted file mode 100644 index 083d94dfc6eb..000000000000 --- a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -[id="nodes-scheduler-node-affinity-configuring_{context}"] -= Configuring node affinity rules - -You can configure two types of node affinity rules: required and preferred. - -== Configuring a required node affinity rule - -Required rules *must* be met before a pod can be scheduled on a node. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - e2e-az-name: e2e-az1 ----- -==== - -. In the pod specification, use the `nodeAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node. -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node: -+ ----- -spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 ----- - -. Create the pod: -+ ----- -$ oc create -f e2e-az2.yaml ----- - -== Configuring a Preferred Node Affinity Rule - -Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az3 ----- - -. In the pod specification, use the `nodeAffinity` stanza to configure the `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify a weight for the node, as a number 1-100. The node with highest weight is preferred. -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node: -+ ----- - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az3 ----- - -. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node. - -. Create the pod. -+ ----- -$ oc create -f e2e-az3.yaml ----- diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc deleted file mode 100644 index 43a1eb49af61..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-examples_{context}"] -= Example taint and toleration scenarios - -Taints and tolerations are a flexible way to steer pods away from nodes or evict pods that should not be running on a node. A few of typical scenarios are: - -* Dedicating a node for a user -* Binding a user to a node -* Dedicating nodes with special hardware - -[id="nodes-scheduler-taints-tolerations-examples-user_{context}"] -== Dedicating a Node for a User - -You can specify a set of nodes for exclusive use by a particular set of users. - -.Procedure - -To specify dedicated nodes: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -Only the pods with the tolerations are allowed to use the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-binding_{context}"] -== Binding a User to a Node - -You can configure a node so that particular users can use only the dedicated nodes. - -.Procedure - -To configure a node so that users can use only that node: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -The admission controller should add a node affinity to require that the pods can only schedule onto nodes labeled with the `key:value` label (`dedicated=groupName`). - -. Add a label similar to the taint (such as the `key:value` label) to the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-special_{context}"] -== Nodes with Special Hardware - -In a cluster where a small subset of nodes have specialized hardware (for example GPUs), you can use taints and tolerations to keep pods that do not need the specialized hardware off of those nodes, leaving the nodes for pods that do need the specialized hardware. You can also require pods that need specialized hardware to use specific nodes. - -.Procedure - -To ensure pods are blocked from the specialized hardware: - -. Taint the nodes that have the specialized hardware using one of the following commands: -+ ----- -$ oc adm taint nodes disktype=ssd:NoSchedule -$ oc adm taint nodes disktype=ssd:PreferNoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: disktype - value: ssd - effect: PreferNoSchedule - ... ----- -==== - -. Adding a corresponding toleration to pods that use the special hardware using an admission controller. - -For example, the admission controller could use some characteristic(s) of the pod to determine that the pod should be allowed to use the special nodes by adding a toleration. - -To ensure pods can only use the specialized hardware, you need some additional mechanism. For example, you could label the nodes that have the special hardware and use node affinity on the pods that need the hardware. diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc deleted file mode 100644 index a8a788cb0aa8..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-seconds_{context}"] -= Setting a default value for toleration seconds - -When using taints and tolerations, if taints are added to an existing node, non-matching pods on that node will be evicted. You can modify the time allowed before pods are evicted using the toleration seconds plug-in, which sets the eviction period at five minutes, by default. - -.Procedure - -To enable Default Toleration Seconds: - -Create an *AdmissionConfiguration* object: -+ ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: DefaultTolerationSeconds -...---- diff --git a/_unused_topics/osdk-updating-projects.adoc b/_unused_topics/osdk-updating-projects.adoc deleted file mode 100644 index ee2cf2600ae0..000000000000 --- a/_unused_topics/osdk-updating-projects.adoc +++ /dev/null @@ -1,353 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-hybrid-helm-updating-projects.adoc - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] -ifeval::["{context}" == "osdk-hybrid-helm-updating-projects"] -:hybrid: -:type: Hybrid Helm -endif::[] - -:osdk_ver: v1.25.0 -:osdk_ver_n1: v1.22.0 - -:_content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Make the following changes to the `config/default/manager_auth_proxy_patch.yaml` file: -+ -[source,yaml] ----- -... -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.12 <1> - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" <2> -... -resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi <3> ----- -<1> Update the tag version from `v4.11` to `v4.12`. -<2> Reduce the debugging log level from `--v=10` to `--v=0`. -<3> Add resource requests and limits. - -. Make the following changes to your `Makefile`: - -.. Enable support for image digests by adding the following environment variables to your `Makefile`: -+ -.Old `Makefile` -[source,terminal] ----- -BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) -... ----- -+ -.New `Makefile` -[source,terminal] ----- -BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) - -# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command -BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - -# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests -# You can enable this value if you would like to use SHA Based Digests -# To enable set flag to true -USE_IMAGE_DIGESTS ?= false -ifeq ($(USE_IMAGE_DIGESTS), true) - BUNDLE_GEN_FLAGS += --use-image-digests -endif ----- - -.. Edit your `Makefile` to replace the bundle target with the `BUNDLE_GEN_FLAGS` environment variable: -+ -.Old `Makefile` -[source,terminal] ----- -$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) ----- -+ -.New `Makefile` -[source,terminal] ----- -$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS) ----- - -.. Edit your `Makefile` to update `opm` to version 1.23.0: -+ -[source,terminal] ----- -.PHONY: opm -OPM = ./bin/opm -opm: ## Download opm locally if necessary. -ifeq (,$(wildcard $(OPM))) -ifeq (,$(shell which opm 2>/dev/null)) - @{ \ - set -e ;\ - mkdir -p $(dir $(OPM)) ;\ - OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ - curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\ <1> - chmod +x $(OPM) ;\ - } -else -OPM = $(shell which opm) -endif -endif ----- -<1> Replace `v1.19.1` with `v1.23.0`. - -ifdef::golang[] -.. Edit your `Makefile` to replace the `go get` targets with `go install` targets: -+ -.Old `Makefile` -[source,terminal] ----- -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -.PHONY: controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0) - -KUSTOMIZE = $(shell pwd)/bin/kustomize -.PHONY: kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) - -ENVTEST = $(shell pwd)/bin/setup-envtest -.PHONY: envtest -envtest: ## Download envtest-setup locally if necessary. - $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) - -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef ----- -+ -.New `Makefile` -[source,terminal] ----- -##@ Build Dependencies - -## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin -$(LOCALBIN): - mkdir -p $(LOCALBIN) - -## Tool Binaries -KUSTOMIZE ?= $(LOCALBIN)/kustomize -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -ENVTEST ?= $(LOCALBIN)/setup-envtest - -## Tool Versions -KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.8.0 - -KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) - -.PHONY: envtest -envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. -$(ENVTEST): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest ----- -endif::[] - -ifdef::golang,hybrid[] -.. Update `ENVTEST_K8S_VERSION` and `controller-gen` fields in your `Makefile` to support Kubernetes 1.24: -+ -[source,terminal] ----- -... -ENVTEST_K8S_VERSION = 1.24 <1> -... -sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.0 <2> ----- -<1> Update version `1.22` to `1.24`. -<2> Update version `0.7.0` to `0.9.0`. -endif::[] - -.. Apply the changes to your `Makefile` and rebuild your Operator by entering the following command: -+ -[source,terminal] ----- -$ make ----- - -ifdef::golang,hybrid[] -. Make the following changes to the `go.mod` file to update Go and its dependencies: -+ -[source,golang] ----- -go 1.18 <1> - -require ( - github.com/onsi/ginkgo v1.16.5 <2> - github.com/onsi/gomega v1.18.1 <3> - k8s.io/api v0.24.0 <4> - k8s.io/apimachinery v0.24.0 <4> - k8s.io/client-go v0.24.0 <4> - sigs.k8s.io/controller-runtime v0.12.1 <5> -) ----- -<1> Update version `1.16` to `1.18`. -<2> Update version `v1.16.4` to `v1.16.5`. -<3> Update version `v1.15.0` to `v1.18.1`. -<4> Update version `v0.22.1` to `v0.24.0`. -<5> Update version `v0.10.0` to `v0.12.1`. -endif::golang,hybrid[] - -ifdef::hybrid[] -. Edit your `go.mod` file to update the Helm Operator plugins: -+ -[source,golang] ----- -github.com/operator-framework/helm-operator-plugins v0.0.11 <1> ----- -<1> Update version `v0.0.8` to `v0.0.11`. -endif::[] - -ifdef::golang,hybrid[] -. Download and clean up the dependencies by entering the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- -endif::[] - -ifdef::golang[] -. If you use the `api/webhook_suitetest.go` and `controllers/suite_test.go` suite test files, make the following changes: -+ -.Old suite test file -[source,golang] ----- -cfg, err := testEnv.Start() ----- -+ -.New suite test file -[source,golang] ----- -var err error -// cfg is defined in this file globally. -cfg, err = testEnv.Start() ----- - -. If you use the Kubernetes declarative plugin, update your Dockerfile with the following changes: - -.. Add the following changes below the line that begins `COPY controllers/ controllers/`: -+ -[source,terminal] ----- -# https://github.com/kubernetes-sigs/kubebuilder-declarative-pattern/blob/master/docs/addon/walkthrough/README.md#adding-a-manifest -# Stage channels and make readable -COPY channels/ /channels/ -RUN chmod -R a+rx /channels/ ----- - -.. Add the following changes below the line that begins `COPY --from=builder /workspace/manager .`: -+ -[source,terminal] ----- -# copy channels -COPY --from=builder /channels /channels ----- -endif::[] - -ifdef::ansible[] -. Update your `requirements.yml` file as shown in the following example: -+ -[source,yaml] ----- -collections: - - name: community.kubernetes - version: "2.0.1" <1> - - name: operator_sdk.util - version: "0.4.0" <2> - - name: kubernetes.core - version: "2.3.1" <3> - - name: cloud.common <4> - version: "2.1.1" ----- -<1> Update version `1.2.1` to `2.0.1`. -<2> Update version `0.3.1` to `0.4.0`. -<3> Update version `2.2.0` to `2.3.1`. -<4> Add support for the Operator Ansible SDK by adding the `cloud.common` collection. -+ -[IMPORTANT] -==== -As of version 2.0.0, the `community.kubernetes` collection was renamed to `kubernetes.core`. The `community.kubernetes` collection has been replaced by deprecated redirects to `kubernetes.core`. If you use fully qualified collection names (FQCNs) that begin with `community.kubernetes`, you must update the FQCNs to use `kubernetes.core`. -==== -endif::[] - -:!osdk_ver: -:!osdk_ver_n1: - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!type: -:!helm: -endif::[] -ifeval::["{context}" == "osdk-hybrid-helm-updating-projects"] -:!hybrid: -:!type: -endif::[] diff --git a/_unused_topics/osdk-updating-v1101-to-v1160.adoc b/_unused_topics/osdk-updating-v1101-to-v1160.adoc deleted file mode 100644 index 11450144ef89..000000000000 --- a/_unused_topics/osdk-updating-v1101-to-v1160.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.16.0 -:osdk_ver_n1: v1.10.1 - -:_content-type: PROCEDURE -[id="osdk-upgrading-v1101-to-v1160_{context}"] -= Updating projects for Operator SDK {osdk_ver} - -The following procedure updates an existing Operator project for compatibility with {osdk_ver}. - -[IMPORTANT] -==== -* Operator SDK v1.16.0 supports Kubernetes 1.22. - -* Many deprecated `v1beta1` APIs were removed in Kubernetes 1.22, including `sigs.k8s.io/controller-runtime v0.10.0` and `controller-gen v0.7`. - -* Updating projects to Kubernetes 1.22 is a breaking change if you need to scaffold `v1beta1` APIs for custom resource definitions (CRDs) or webhooks to publish your project into older cluster versions. - -See link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-osdk-k8s-api-bundle-validate[Validating bundle manifests for APIs removed from Kubernetes 1.22] and link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-removed-kube-1-22-apis[Beta APIs removed from Kubernetes 1.22] for more information about changes introduced in Kubernetes 1.22. -==== - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Add the `protocol` field in the `config/default/manager_auth_proxy_patch.yaml` and `config/rbac/auth_proxy_service.yaml` files: -+ -[source,diff] ----- -... - ports: - - containerPort: 8443 -+ protocol: TCP - name: https ----- - -. Make the following changes to the `config/manager/manager.yaml` file: - -.. Increase the CPU and memory resource limits: -+ -[source,diff] ----- -resources: - limits: -- cpu: 100m -- memory: 30Mi -+ cpu: 200m -+ memory: 100Mi ----- - -.. Add an annotation to specify the default container manager: -+ -[source,yaml] ----- -... -template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager -... ----- - -. Add `PHONY` targets to all of the targets in your `Makefile` file. - -. For Go-based Operator projects, make the following changes: - -.. Install the `setup-envtest` binary. - -.. Change your `go.mod` file to update the dependencies: -+ -[source,golang] ----- -k8s.io/api v0.22.1 -k8s.io/apimachinery v0.22.1 -k8s.io/client-go v0.22.1 -sigs.k8s.io/controller-runtime v0.10.0 ----- - -.. Run the `go mod tidy` command to download the dependencies: -+ -[source,terminal] ----- -$ go mod tidy ----- - -.. Make the following changes to your `Makefile` file: -+ -[source,diff] ----- -... - -+ ENVTEST_K8S_VERSION = 1.22 - - test: manifests generate fmt vet envtest ## Run tests. -- go test ./... -coverprofile cover.out -+ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out -... - -- $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases -+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases -... - -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -- CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" -... -- admissionReviewVersions={v1,v1beta1} -+ admissionReviewVersions=v1 -... - -+ ifndef ignore-not-found -+ ignore-not-found = false -+ endif - -##@ Deployment -... -- sh kubectl delete -f - -+ sh kubectl delete --ignore-not-found=$(ignore-not-found) -f - ----- - -.. Run the `make manifest` command to generate your manifests with the updated version of Kubernetes: -+ -[source,terminal] ----- -$ make manifest ----- - -. For Ansible-based Operator projects, make the following changes: -+ -.. Change your `requirements.yml` file to include the following: - -... Replace the `community.kubernetes` collection with the `kubernetes.core` collection: -+ -[source,yaml] ----- -... -- name: kubernetes.core - version: "2.2.0" -... ----- - -... Update the `operator_sdk.util` utility from version `0.2.0` to `0.3.1`: -+ -[source,yaml] ----- -... -- name: operator_sdk.util - version: "0.3.1" ----- - -.. Verify the default resource limits in the `config/manager/manager.yaml` file: -+ -[source,yaml] ----- -... - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -resources: - limits: - cpu: 500m - memory: 768Mi - requests: - cpu: 10m - memory: 256Mi ----- -+ -[IMPORTANT] -==== -Operator SDK scaffolds these values as a reasonable default setting. Operator authors should set and optimize resource limits based on the requirements of their project. -==== - -.. Optional: Make the following changes if you want to run your Ansible-based Operator locally by using the `make run` command: - -... Change the run target in the `Makefile` file: -+ -[source,terminal] ----- -ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run ----- - -... Update the local version of `ansible-runner` to 2.0.2 or later. -+ -[IMPORTANT] -==== -As of version 2.0, the `ansible-runner` tool includes changes in the command signature that are not compatible with earlier versions. -==== - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc deleted file mode 100644 index 91a7d73f2957..000000000000 --- a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.10.1 -:osdk_ver_n1: v1.8.0 - -:_content-type: PROCEDURE -[id="osdk-upgrading-v180-to-v1101_{context}"] -= Upgrading projects for Operator SDK {osdk_ver} - -The following upgrade steps must be performed to upgrade an existing Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -- Operator SDK {osdk_ver} installed -- Operator project that was previously created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -* For Ansible-based Operator projects, update the command in the `Set pull policy` section of the `molecule/default/prepare.yml` file: -+ -.`molecule/default/prepare.yml` file diff -[%collapsible] -==== -[source,diff] ----- - - name: Set pull policy -- command: '{{ "{{ kustomize }}" }} edit add patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' -+ command: '{{ "{{ kustomize }}" }} edit add patch --path pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' ----- -==== -+ -Ansible projects are now scaffolded with Kustomize version 3.8.7. This version of Kustomize requires that the path to patch files be provided with the `--path` flag in the `add patch` command. - -Your Operator project is now compatible with Operator SDK {osdk_ver}. - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/pod-using-a-different-service-account.adoc b/_unused_topics/pod-using-a-different-service-account.adoc deleted file mode 100644 index 29794c9f858c..000000000000 --- a/_unused_topics/pod-using-a-different-service-account.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="pod-using-a-different-service-account_{context}"] -= Running a pod with a different service account - -You can run a pod with a service account other than the default: - -.Prerequisites - -* Install the `oc` command line interface. -* Configure a service account. -* Create a DeploymentConfig. - -.Procedure - -. Edit the DeploymentConfig: -+ ----- -$ oc edit dc/ ----- - -. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` -field, and specify the service account that you want to use: -+ ----- -spec: - securityContext: {} - serviceAccount: - serviceAccountName: ----- diff --git a/_unused_topics/rbac-updating-policy-definitions.adoc b/_unused_topics/rbac-updating-policy-definitions.adoc deleted file mode 100644 index 1a2e45a62e90..000000000000 --- a/_unused_topics/rbac-updating-policy-definitions.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[id="updating-policy-definitions_{context}"] -= Updating policy definitions - -During a cluster upgrade, and on every restart of any master, the -default cluster roles are automatically reconciled to restore any missing permissions. - -If you customized default cluster roles and want to ensure a role reconciliation -does not modify them, you must take the following actions. - -.Procedure - -. Protect each role from reconciliation: -+ ----- -$ oc annotate clusterrole.rbac --overwrite rbac.authorization.kubernetes.io/autoupdate=false ----- -+ -[WARNING] -==== -You must manually update the roles that contain this setting to include any new -or required permissions after upgrading. -==== - -. Generate a default bootstrap policy template file: -+ ----- -$ oc adm create-bootstrap-policy-file --filename=policy.json ----- -+ -[NOTE] -==== -The contents of the file vary based on the {product-title} version, but the file -contains only the default policies. -==== - -. Update the *_policy.json_* file to include any cluster role customizations. - -. Use the policy file to automatically reconcile roles and role bindings that -are not reconcile protected: -+ ----- -$ oc auth reconcile -f policy.json ----- - -. Reconcile Security Context Constraints: -+ ----- -# oc adm policy reconcile-sccs \ - --additive-only=true \ - --confirm ----- -endif::[] diff --git a/_unused_topics/running-modified-installation.adoc b/_unused_topics/running-modified-installation.adoc deleted file mode 100644 index f2c75c4d0d68..000000000000 --- a/_unused_topics/running-modified-installation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="running-modified-installation_{context}"] -= Running a modified {product-title} installation - -Running a default {product-title} {product-version} cluster is the best way to ensure that the {product-title} cluster you get will be easy to install, maintain, and upgrade going forward. However, because you may want to add to or change your {product-title} cluster, openshift-install offers several ways to modify the default installation or add to it later. These include: - -* Creating an install-config file: Changing the contents of the install-config file, to identify things like the cluster name and credentials, is fully supported. -* Creating ignition-config files: Viewing ignition-config files, which define how individual nodes are configured when they are first deployed, is fully supported. However, changing those files is not supported. -* Creating Kubernetes (manifests) and {product-title} (openshift) manifest files: You can view manifest files in the manifests and openshift directories to see how Kubernetes and {product-title} features are configured, respectively. Changing those files is not supported. - -Whether you want to change your {product-title} installation or simply gain a deeper understanding of the details of the installation process, the goal of this section is to step you through an {product-title} installation. Along the way, it covers: - -* The underlying activities that go on under the covers to bring up an {product-title} cluster -* Major components that are leveraged ({op-system}, Ignition, Terraform, and so on) -* Opportunities to customize the install process (install configs, Ignition configs, manifests, and so on) diff --git a/_unused_topics/security-context-constraints-restore-defaults.adoc b/_unused_topics/security-context-constraints-restore-defaults.adoc deleted file mode 100644 index 089b41a32468..000000000000 --- a/_unused_topics/security-context-constraints-restore-defaults.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-context-constraints-restore-defaults_{context}"] -= Restoring the default Security Context Constraints - -If the default Security Context Constraints (SCCs) are not present when the -master restarts, they are created again. To reset SCCs to the default values or -update existing SCCs to new default definitions after an upgrade you can either: - -. Delete any SCC you want to reset and restart the master. -. Use the `oc adm policy reconcile-sccs` command. - -The `oc adm policy reconcile-sccs` command sets all SCC policies to the default -values but retains any additional users, groups, labels, annotations, and -priorities you set. - -To view which SCCs will be changed, you can run the command with no options or -by specifying your preferred output with the `-o ` option. - -After reviewing it is recommended that you back up your existing SCCs and then -use the `--confirm` option to persist the data. - -[NOTE] -==== -If you want to reset priorities and grants, use the `--additive-only=false` option. -==== - -[NOTE] -==== -If you customized settings other than priority, users, groups, labels, or annotations in an -SCC, you lose those settings when you reconcile. -==== diff --git a/_unused_topics/security-overview.adoc b/_unused_topics/security-overview.adoc deleted file mode 100644 index 4e9ea7cfc726..000000000000 --- a/_unused_topics/security-overview.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-overview_{context}"] -= Security in {product-title} - -The {product-title} and Kubernetes APIs authenticate users who present -credentials and then authorizes them based on their role. Both developers and -administrators can be authenticated through a number of means, primarily -OAuth tokens and X.509 client certificates. OAuth tokens are signed with JSON -Web Algorithm _RS256_, which is RSA signature algorithm PKCS#1 v1.5 with SHA-256. - -Developers, the clients of the system, typically make REST API calls from a -client program like `oc` or to the {product-title} web console through their browser. -Both methods use OAuth bearer tokens for most communication. Infrastructure components. -like nodes, use client certificates that are generated by the system that contain their -identities. Infrastructure components that run in containers use a token that is -associated with their service account to connect to the API. - -Authorization is handled in the {product-title} policy engine, which defines -actions like `create pod` or `list services`, and groups them into roles in a -policy document. Roles are bound to users or groups by the user or group -identifier. When a user or service account attempts an action, the policy engine -checks for one or more of the roles assigned to the user, such as a cluster -administrator or administrator of the current project, before allowing it to -continue. - -ifdef::openshift-origin,openshift-online,openshift-enterprise,openshift-webscale[] -Since every container that runs on the cluster is associated with a service -account, it is also possible to associate secrets to those service accounts and have them -automatically delivered into the container. This secret delivery enables the infrastructure to -manage secrets for pulling and pushing images, builds, and the deployment -components and also allows application code to use those secrets. -endif::[] - -[id="architecture-overview-tls-support_{context}"] -== TLS Support - -All communication channels with the REST API, as well as between master -components such as etcd and the API server, are secured with TLS. TLS provides -strong encryption, data integrity, and authentication of servers with X.509 -server certificates and public key infrastructure. -ifdef::openshift-origin,openshift-enterprise[] -By default, a new internal PKI is created for each deployment of -{product-title}. The internal PKI uses 2048 bit RSA keys and SHA-256 signatures. -endif::[] -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -Custom certificates for public hosts are supported as well. -endif::[] - -{product-title} uses Golang’s standard library implementation of -link:https://golang.org/pkg/crypto/tls/[*crypto/tls*] and does not depend on any -external crypto and TLS libraries. Additionally, the client depends on external -libraries for GSSAPI authentication and OpenPGP signatures. GSSAPI is typically -provided by either MIT Kerberos or Heimdal Kerberos, which both use OpenSSL's -libcrypto. OpenPGP signature verification is handled by libgpgme and GnuPG. - -The insecure versions SSL 2.0 and SSL 3.0 are unsupported and not available. The -{product-title} server and `oc` client only provide TLS 1.2 by default. TLS 1.0 -and TLS 1.1 can be enabled in the server configuration. Both server and client -prefer modern cipher suites with authenticated encryption algorithms and perfect -forward secrecy. Cipher suites with deprecated and insecure algorithms such as -RC4, 3DES, and MD5 are disabled. Some internal clients, like LDAP -authentication, have less restrict settings with TLS 1.0 to 1.2 and more cipher -suites enabled. - -.Supported TLS Versions -[cols="4*", options="header"] -|=== -|TLS Version -|{product-title} Server -|`oc` Client -|Other Clients - -|SSL 2.0 -|Unsupported -|Unsupported -|Unsupported - -|SSL 3.0 -|Unsupported -|Unsupported -|Unsupported - -|TLS 1.0 -|No footnoteref:[tlsconfig,Disabled by default, but can be enabled in the server configuration.] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient,Some internal clients, such as the LDAP client.] - -|TLS 1.1 -|No footnoteref:[tlsconfig] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient] - -|TLS 1.2 -|*Yes* -|*Yes* -|*Yes* - -|TLS 1.3 -|N/A footnoteref:[tls13,TLS 1.3 is still under development.] -|N/A footnoteref:[tls13] -|N/A footnoteref:[tls13] -|=== - -The following list of enabled cipher suites of {product-title}'s server and `oc` -client are sorted in preferred order: - -- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA` -- `TLS_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_RSA_WITH_AES_128_CBC_SHA` -- `TLS_RSA_WITH_AES_256_CBC_SHA` diff --git a/_unused_topics/serverless-creating-kubeconfig-file.adoc b/_unused_topics/serverless-creating-kubeconfig-file.adoc deleted file mode 100644 index cecdb2fbba4f..000000000000 --- a/_unused_topics/serverless-creating-kubeconfig-file.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module is included in the following assemblies: -// -// serverless/knative-client.adoc - -[id="create-kubeconfig-file_{contect}"] -= Creating a `kubeconfig` file - -Use `kubeconfig` files to organize information about clusters, users, namespaces, and authentication mechanisms. The CLI tool uses `kubeconfig` files to communicate with the API server of a cluster. - -.Procedure -* Create a basic `kubeconfig` file from client certificates. Use the following command: - ----- -$ oc adm create-kubeconfig \ - --client-certificate=/path/to/client.crt \ - --client-key=/path/to/client.key \ - --certificate-authority=/path/to/ca.crt ----- \ No newline at end of file diff --git a/_unused_topics/serverless-rn-template-module.adoc b/_unused_topics/serverless-rn-template-module.adoc deleted file mode 100644 index 2b373d05d109..000000000000 --- a/_unused_topics/serverless-rn-template-module.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies -// -// * /serverless/serverless-release-notes.adoc - -:_content-type: REFERENCE -[id="serverless-rn-_{context}"] -= Release notes for Red Hat {ServerlessProductName} -// add a version, e.g. 1.20.0 -//update the to match the filename and IDs, then remove these comments - -{ServerlessProductName} is now available. New features, changes, and known issues that pertain to {ServerlessProductName} on {product-title} are included in this topic. - -[id="new-features-_{context}"] -== New features -// add a version, e.g. 1-20-0 - -* {ServerlessProductName} now uses Knative Serving 0.x. -* {ServerlessProductName} now uses Knative Eventing 0.x. -* {ServerlessProductName} now uses Kourier 0.x. -* {ServerlessProductName} now uses Knative (`kn`) CLI 0.x. -* {ServerlessProductName} now uses Knative Kafka 0.x. -* The `kn func` CLI plug-in now uses `func` 0.x. - -[id="fixed-issues-_{context}"] -== Fixed issues -// add a version, e.g. 1-20-0 - -[id="known-issues-_{context}"] -== Known issues -// add a version, e.g. 1-20-0 diff --git a/_unused_topics/service-accounts-adding-secrets.adoc b/_unused_topics/service-accounts-adding-secrets.adoc deleted file mode 100644 index 11d925ea62c7..000000000000 --- a/_unused_topics/service-accounts-adding-secrets.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -== Managing secrets on a service account's pod - -In addition to providing API credentials, a pod's service account determines -which secrets the pod is allowed to use. - -Pods use secrets in two ways: - -* image pull secrets, providing credentials used to pull images for the pod's containers -* mountable secrets, injecting the contents of secrets into containers as files - -To allow a secret to be used as an image pull secret by a service account's -pods, run: - ----- -$ oc secrets link --for=pull ----- - -To allow a secret to be mounted by a service account's pods, run: - ----- -$ oc secrets link --for=mount ----- - -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== - -This example creates and adds secrets to a service account: - ----- -$ oc create secret generic secret-plans \ - --from-file=plan1.txt \ - --from-file=plan2.txt -secret/secret-plans - -$ oc create secret docker-registry my-pull-secret \ - --docker-username=mastermind \ - --docker-password=12345 \ - --docker-email=mastermind@example.com -secret/my-pull-secret - -$ oc secrets link robot secret-plans --for=mount - -$ oc secrets link robot my-pull-secret --for=pull - -$ oc describe serviceaccount robot -Name: robot -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- diff --git a/_unused_topics/service-accounts-managing-secrets.adoc b/_unused_topics/service-accounts-managing-secrets.adoc deleted file mode 100644 index cae0fb9bf790..000000000000 --- a/_unused_topics/service-accounts-managing-secrets.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -= Managing allowed secrets - -You can use the service account's secrets in your application's pods for: - -* Image pull secrets, providing credentials used to pull images for the pod's containers -* Mountable secrets, injecting the contents of secrets into containers as files - -.Procedure - -. Create a secret: -+ ----- -$ oc create secret generic \ - --from-file=.txt - -secret/ ----- - -. To allow a secret to be used as an image pull secret by a service account's -pods, run: -+ ----- -$ oc secrets link --for=pull ----- - -. To allow a secret to be mounted by a service account's pods, run: -+ ----- -$ oc secrets link --for=mount ----- - -. Confirm that the secret was added to the service account: -+ ----- -$ oc describe serviceaccount -Name: -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- - -//// -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== -//// diff --git a/_unused_topics/understanding-installation.adoc b/_unused_topics/understanding-installation.adoc deleted file mode 100644 index dbd19c82853d..000000000000 --- a/_unused_topics/understanding-installation.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="understanding-installation_{context}"] -= Understanding {product-title} installation - -{product-title} installation is designed to quickly spin up an {product-title} cluster, with the user starting the cluster required to provide as little information as possible. diff --git a/_unused_topics/understanding-workers-masters.adoc b/_unused_topics/understanding-workers-masters.adoc deleted file mode 100644 index b0028c61b6b4..000000000000 --- a/_unused_topics/understanding-workers-masters.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="understanding-workers-masters_{context}"] -= Understanding {product-title} workers and masters - -With installation complete, the cluster is now fully in charge of managing itself. Management of worker (compute) and master (control plane) nodes is done from within the cluster. So, before moving on to what the {product-title} cluster does to help you develop and deploy applications, you should explore how an {product-title} cluster manages itself. For that, we focus on three things; workers, masters (the control plane) and Operators. - -To see which workers and masters are running on your cluster, type: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION -ip-10-0-0-1.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-2.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0.3.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-4.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-5.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-6.us-east-2.compute.internal Ready worker 4h20m v1.25.0 ----- - -To see more information about internal and external IP addresses, the type of operating system ({op-system}), kernel version, and container runtime (CRI-O), add the `-o wide` option. - ----- -$ oc get nodes -o wide - -NAME                                       STATUS ROLES  AGE  VERSION  INTERNAL-IP   EXTERNAL-IP  OS-IMAGE             KERNEL-VERSION             CONTAINER-RUNTIME -ip-10-0-134-252.us-east-2.compute.internal Ready worker 17h v1.25.0 10.0.134.252 Red Hat CoreOS 4.0 3.10.0-957.5.1.el7.x86_64 cri-o://1.25.0-1.rhaos4.0.git2f0cb0d.el7 - -.... ----- diff --git a/_unused_topics/upgrade-cluster-version-definition.adoc b/_unused_topics/upgrade-cluster-version-definition.adoc deleted file mode 100644 index 15f39773dd38..000000000000 --- a/_unused_topics/upgrade-cluster-version-definition.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="upgrade-cluster-version-definition_{context}"] -= ClusterVersion definition - -You can review the `ClusterVersion` definition to see the update history -for your cluster. You can also apply overrides to this definition if your -cluster is not for production or during debugging. - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: ClusterVersion -metadata: - creationTimestamp: 2019-03-22T14:26:41Z - generation: 1 - name: version - resourceVersion: "16740" - selfLink: /apis/config.openshift.io/v1/clusterversions/version - uid: 82f9f2c4-4cae-11e9-90b7-06dc0f62ad38 -spec: - channel: stable-4.3 <1> - overrides: "" <2> - clusterID: 0b1cf91f-c3fb-4f9e-aa02-e0d70c71f6e6 - status: <3> - availableUpdates: null <4> - conditions: <5> - - lastTransitionTime: 2019-05-22T07:13:26Z - status: "True" - type: RetrievedUpdates - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Done applying 4.0.0-0.alpha-2019-03-22-124110 - status: "True" - type: Available - - lastTransitionTime: 2019-05-22T07:12:26Z - status: "False" - type: Failing - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Cluster version is 4.0.0-0.alpha-2019-03-22-124110 - status: "False" - type: Progressing ----- -<1> Specify the channel to use to apply non-standard updates to the -cluster. If you do not change the value, the CVO uses the default channel. -+ -[IMPORTANT] -==== -The default channel contains stable updates. Do not modify the -`ClusterVersionSpec.channel` value on production clusters. If you update your -cluster from a different channel without explicit direction from Red Hat -support, your cluster is no longer supported. -==== -<2> A list of overrides for components that the CVO manages. Mark -components as `unmanaged` to prevent the CVO from creating or updating the object. -+ -[IMPORTANT] -==== -Set the `ClusterVersionSpec.overrides` parameter value only during cluster -debugging. Setting this value can prevent successful upgrades and is not -supported for production clusters. -==== -<3> The status of available updates and any in-progress updates. These values display -the version that the cluster is reconciling to, and the conditions -array reports whether the update succeeded, is in progress, or is failing. -All of the `ClusterVersionStatus` values are set by the cluster itself, and you -cannot modify them. -<4> The list of appropriate updates for the cluster. This list is empty if no -updates are recommended, the update service is unavailable, or you specified -an invalid channel. -<5> The condition of the CVO. This section contains both the reason that the -cluster entered its current condition and a message that provides more -information about the condition. - -* `Available` means that the upgrade to the `desiredUpdate` value completed. -* `Progressing` means that an upgrade is in progress. -* `Failing` means that an update is blocked by a temporary or permanent error. diff --git a/_unused_topics/using-images-source-to-image-java.adoc b/_unused_topics/using-images-source-to-image-java.adoc deleted file mode 100644 index 06933b38c65f..000000000000 --- a/_unused_topics/using-images-source-to-image-java.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image"] -= Java -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image -toc::[] - -This topic includes information on the source-to-image (S2I) supported Java images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-java.adoc[leveloffset=+1] -include::modules/images-s2i-java-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-s2i-java-configuration.adoc[leveloffset=+1] -include::modules/images-s2i-java-build-deploy-applications.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-nodejs.adoc b/_unused_topics/using-images-source-to-image-nodejs.adoc deleted file mode 100644 index 5b176a926821..000000000000 --- a/_unused_topics/using-images-source-to-image-nodejs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-nodejs"] -= Node.js -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-nodejs -toc::[] - -This topic includes information on the source-to-image (S2I) supported Node.js images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-nodejs.adoc[leveloffset=+1] -include::modules/images-s2i-nodejs-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-perl.adoc b/_unused_topics/using-images-source-to-image-perl.adoc deleted file mode 100644 index f49d044ab927..000000000000 --- a/_unused_topics/using-images-source-to-image-perl.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-perl"] -= Perl -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-perl -toc::[] - -This topic includes information on the source-to-image (S2I) supported Perl images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-perl.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-php.adoc b/_unused_topics/using-images-source-to-image-php.adoc deleted file mode 100644 index 275223464506..000000000000 --- a/_unused_topics/using-images-source-to-image-php.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-php"] -= PHP -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-php -toc::[] - -This topic includes information on the source-to-image (S2I) supported PHP images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-php.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-python.adoc b/_unused_topics/using-images-source-to-image-python.adoc deleted file mode 100644 index f72452a4b3e7..000000000000 --- a/_unused_topics/using-images-source-to-image-python.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-python"] -= Python -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-python -toc::[] - -This topic includes information on the source-to-image (S2I) supported Python images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-python.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-ruby.adoc b/_unused_topics/using-images-source-to-image-ruby.adoc deleted file mode 100644 index b96681837cc7..000000000000 --- a/_unused_topics/using-images-source-to-image-ruby.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-ruby"] -= Ruby -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-ruby -toc::[] - -This topic includes information on the source-to-image (S2I) supported Ruby images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-ruby.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/windows-machine-config-operator.adoc b/_unused_topics/windows-machine-config-operator.adoc deleted file mode 100644 index f315ccefb886..000000000000 --- a/_unused_topics/windows-machine-config-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="windows-machine-config-operator_{context}"] -= Windows Machine Config Operator - -[discrete] -== Purpose - -The Windows Machine Config Operator (WMCO) orchestrates the process of deploying and managing Windows workloads on a cluster. The WMCO configures Windows machines into compute nodes, enabling Windows container workloads to run in {product-title} clusters. This is done by creating a compute machine set that uses a Windows image with the Docker-formatted container runtime installed. The WMCO completes all necessary steps to configure the underlying Windows VM so that it can join the cluster as a compute node. - -[discrete] -== Project - -link:https://github.com/openshift/windows-machine-config-operator[windows-machine-config-operator] diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc deleted file mode 100644 index bceea84dd308..000000000000 --- a/adding_service_cluster/adding-service.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="adding-service"] -= Adding services to a cluster using {cluster-manager-first} console -:context: adding-service - -toc::[] - -You can add, access, and remove add-on services for your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -ifdef::openshift-rosa[] -== Prerequisites -* For the Amazon CloudWatch service, you must first install the `cluster-logging-operator` using the ROSA CLI (`rosa`). -endif::[] - -include::modules/adding-service-existing.adoc[leveloffset=+1] -include::modules/access-service.adoc[leveloffset=+1] -include::modules/deleting-service.adoc[leveloffset=+1] -//include::modules/deleting-service-cli.adoc[leveloffset=+1] - -ifdef::openshift-rosa[] -[role="_additional-resources"] -== Additional resources -* For information about the `cluster-logging-operator` and the AWS CloudWatch log forwarding service, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] -endif::[] diff --git a/adding_service_cluster/available-services.adoc b/adding_service_cluster/available-services.adoc deleted file mode 100644 index e1404beb62ea..000000000000 --- a/adding_service_cluster/available-services.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="available-services"] -= Add-on services available for {product-title} -:context: available-services - -toc::[] - -You can add services to your existing {product-title} cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/adding_service_cluster/images b/adding_service_cluster/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/adding_service_cluster/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/adding_service_cluster/modules b/adding_service_cluster/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/adding_service_cluster/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc deleted file mode 100644 index bc7325278d6f..000000000000 --- a/adding_service_cluster/rosa-available-services.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="rosa-available-services"] -= Add-on services available for {product-title} -:context: rosa-available-services - - -You can add services to your existing {product-title} (ROSA) cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -These services can also be installed xref:../rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the ROSA CLI (`rosa`)]. - - -include::modules/aws-cloudwatch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information] -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] - -include::modules/osd-rhoam.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_api_management[Red Hat OpenShift API Management] documentation - -//// -include::modules/rosa-rhoda.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-database-access[Red Hat OpenShift Database Access] product page -//// -// This module and additional resource are no longer included in the document due to OSDOCS-5817. - -include::modules/rosa-rhods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_science/1[Red Hat OpenShift Data Science] documentation -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-data-science[Red Hat OpenShift Data Science] product page diff --git a/applications/PLACEHOLDER b/applications/PLACEHOLDER deleted file mode 100644 index 985a0e1895b7..000000000000 --- a/applications/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please leave this file until after Node PRs merge, as is it needed for the topic_yaml. Subtopics are not allowed, apparently, without at least one topic in the TOC - diff --git a/applications/_attributes b/applications/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/applications/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/applications/application-health.adoc b/applications/application-health.adoc deleted file mode 100644 index de694b2f8e2c..000000000000 --- a/applications/application-health.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -:context: application-health -[id="application-health"] -= Monitoring application health by using health checks -include::_attributes/common-attributes.adoc[] - -toc::[] - - -In software systems, components can become unhealthy due to transient issues such as temporary connectivity loss, configuration errors, or problems with external dependencies. {product-title} applications have a number of options to detect and handle unhealthy containers. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/application-health-about.adoc[leveloffset=+1] - -include::modules/application-health-configuring.adoc[leveloffset=+1] - -include::modules/odc-monitoring-application-health-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-adding-health-checks.adoc[leveloffset=+1] - -include::modules/odc-editing-health-checks.adoc[leveloffset=+1] - -include::modules/odc-monitoring-health-checks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For details on switching to the *Developer* perspective in the web console, see xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[About the *Developer* perspective]. -* For details on adding health checks while creating and deploying an application, see *Advanced Options* in the xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] section. diff --git a/applications/config-maps.adoc b/applications/config-maps.adoc deleted file mode 100644 index 39f675a19c14..000000000000 --- a/applications/config-maps.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="config-maps"] -= Using config maps with applications -include::_attributes/common-attributes.adoc[] -:context: config-maps - -toc::[] - -Config maps allow you to decouple configuration artifacts from image content to keep containerized applications portable. - -The following sections define config maps and how to create and use them. - -For information on creating config maps, see xref:../nodes/pods/nodes-pods-configmaps.adoc[Creating and using config maps]. - -include::modules/nodes-pods-configmap-overview.adoc[leveloffset=+1] - -[id="nodes-pods-config-maps-consuming-configmap-in-pods"] -== Use cases: Consuming config maps in pods - -The following sections describe some uses cases when consuming `ConfigMap` -objects in pods. - -include::modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc[leveloffset=+2] diff --git a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc b/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc deleted file mode 100644 index 3a22f233cb3d..000000000000 --- a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="binding-workloads-using-sbo"] -= Binding workloads using Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: binding-workloads-using-sbo - -toc::[] - -Application developers must bind a workload to one or more backing services by using a binding secret. This secret is generated for the purpose of storing information to be consumed by the workload. - -As an example, consider that the service you want to connect to is already exposing the binding data. In this case, you would also need a workload to be used along with the `ServiceBinding` custom resource (CR). By using this `ServiceBinding` CR, the workload sends a binding request with the details of the services to bind with. - -.Example of `ServiceBinding` CR -[source,yaml] ----- -apiVersion: binding.operators.coreos.com/v1alpha1 -kind: ServiceBinding -metadata: - name: spring-petclinic-pgcluster - namespace: my-petclinic -spec: - services: <1> - - group: postgres-operator.crunchydata.com - version: v1beta1 - kind: PostgresCluster - name: hippo - application: <2> - name: spring-petclinic - group: apps - version: v1 - resource: deployments ----- -<1> Specifies a list of service resources. -<2> The sample application that points to a Deployment or any other similar resource with an embedded PodSpec. - -As shown in the previous example, you can also directly use a `ConfigMap` or a `Secret` itself as a service resource to be used as a source of binding data. - -include::modules/sbo-naming-strategies.adoc[leveloffset=+1] -include::modules/sbo-advanced-binding-options.adoc[leveloffset=+1] -include::modules/sbo-binding-workloads-that-are-not-compliant-with-PodSpec.adoc[leveloffset=+1] -include::modules/sbo-unbinding-workloads-from-a-backing-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_binding-workloads-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#binding-a-workload-together-with-a-backing-service_understanding-service-binding-operator[Binding a workload together with a backing service]. -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#connecting-the-spring-petclinic-sample-application-to-the-postgresql-database-service[Connecting the Spring PetClinic sample application to the PostgreSQL database service]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-creating-custom-resources-from-file_crd-managing-resources-from-crds[Creating custom resources from a file] -* link:https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/custom-path-injection.html#_workload_resource_mapping[Example schema of the ClusterWorkloadResourceMapping resource]. - diff --git a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc deleted file mode 100644 index 50c305d5f699..000000000000 --- a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="exposing-binding-data-from-a-service"] -= Exposing binding data from a service -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: exposing-binding-data-from-a-service - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider requires a different way to access their secrets and consume them in a workload. - -The {servicebinding-title} enables application developers to easily bind workloads together with operator-managed backing services, without any manual procedures to configure the binding connection. For the {servicebinding-title} to provide the binding data, as an Operator provider or user who creates backing services, you must expose the binding data to be automatically detected by the {servicebinding-title}. Then, the {servicebinding-title} automatically collects the binding data from the backing service and shares it with a workload to provide a consistent and predictable experience. - -include::modules/sbo-methods-of-exposing-binding-data.adoc[leveloffset=+1] -include::modules/sbo-data-model.adoc[leveloffset=+1] -include::modules/sbo-setting-annotations-mapping-optional.adoc[leveloffset=+1] -include::modules/sbo-rbac-requirements.adoc[leveloffset=+1] -include::modules/sbo-categories-of-exposable-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_exposing-binding-data"] -== Additional resources -// * link:https://github.com/openshift/console/blob/master/frontend/packages/operator-lifecycle-manager/src/components/descriptors/reference/reference.md[OLM Descriptor Reference]. -// When OLM descriptors are supported again, add this additional resource. -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-generating-csvs[Defining cluster service versions (CSVs)]. -* xref:../../applications/connecting_applications_to_services/projecting-binding-data.adoc#projecting-binding-data[Projecting binding data]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc deleted file mode 100644 index 001b8a622b0e..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="getting-started-with-service-binding-ibm-power-ibm-z"] -= Getting started with service binding on {ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding-ibm-power-ibm-z - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed the `oc` CLI. -* You have installed the {servicebinding-title} from OperatorHub. - -//Deploying PostgreSQL operator -include::modules/sbo-deploying-a-postgresql-database-operator-power-z.adoc[leveloffset=+1] - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance-power-z.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application-power-z.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service-power-z.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-with-service-binding-ibm-power-ibm-z"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator] -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions] diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc deleted file mode 100644 index 6fb5ac0f69ab..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="getting-started-with-service-binding"] -= Getting started with service binding -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed the `oc` CLI. -* You have installed {servicebinding-title} from OperatorHub. -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-petclinic` namespace. -+ -[NOTE] -==== -You can create the namespace using the `oc create namespace my-petclinic` command. -==== - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-sbo"] -== Additional Resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator]. -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions]. -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/installing-sbo.adoc b/applications/connecting_applications_to_services/installing-sbo.adoc deleted file mode 100644 index afd97908dad6..000000000000 --- a/applications/connecting_applications_to_services/installing-sbo.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-sbo"] -= Installing Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: installing-sbo - -toc::[] - -[role="_abstract"] -This guide walks cluster administrators through the process of installing the {servicebinding-title} to an {product-title} cluster. - -You can install {servicebinding-title} on {product-title} 4.7 and later. - -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* Your cluster has the xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc#enabling-cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. - - -//Installing Service Binding Operator using web console - -include::modules/op-installing-sbo-operator-using-the-web-console.adoc[leveloffset=+1] - - -== Additional Resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc deleted file mode 100644 index 5ea513d917dd..000000000000 --- a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -= Connecting an application to a service using the Developer perspective -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: odc-connecting-an-application-to-a-service-using-the-developer-perspective - -toc::[] - -[role="_abstract"] - -Use the *Topology* view for the following purposes: - -** Group multiple components within an application. - -** Connect components with each other. - -** Connect multiple resources to services with labels. - -You can either use a binding or a visual connector to connect components. - - -A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. When an application is connected to a service by using a binding connector a `ServiceBinding` resource is created. Then, the {servicebinding-title} controller projects the necessary binding data into the application deployment. After the request is successful, the application is redeployed establishing an interaction between the connected components. - -A visual connector establishes only a visual connection between the components, depicting an intent to connect. No interaction between the components is established. If the target node is not an Operator-backed service the *Create a visual connector* tool-tip is displayed when you drag an arrow to a target node. - -include::modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc[leveloffset=+1] -include::modules/odc-creating-a-visual-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-creating-a-binding-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc[leveloffset=+1] -include::modules/odc-visualizing-the-binding-connections-to-resources.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators]. diff --git a/applications/connecting_applications_to_services/projecting-binding-data.adoc b/applications/connecting_applications_to_services/projecting-binding-data.adoc deleted file mode 100644 index c27b55566bc1..000000000000 --- a/applications/connecting_applications_to_services/projecting-binding-data.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="projecting-binding-data"] -= Projecting binding data -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: projecting-binding-data - -toc::[] - -[role="_abstract"] -This section provides information on how you can consume the binding data. - -== Consumption of binding data -After the backing service exposes the binding data, for a workload to access and consume this data, you must project it into the workload from a backing service. {servicebinding-title} automatically projects this set of data into the workload in the following methods: - -. By default, as files. -. As environment variables, after you configure the `.spec.bindAsFiles` parameter from the `ServiceBinding` resource. - -include::modules/sbo-configuration-of-directory-path-to-project-binding-data.adoc[leveloffset=+1] -include::modules/sbo-projecting-the-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_projecting-binding-data-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc#exposing-binding-data-from-a-service[Exposing binding data from a service]. -* link:https://redhat-developer.github.io/service-binding-operator/userguide/using-projected-bindings/using-projected-bindings.html[Using the projected binding data in the source code of the application]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/sbo-release-notes.adoc b/applications/connecting_applications_to_services/sbo-release-notes.adoc deleted file mode 100644 index c4a67d04d8c8..000000000000 --- a/applications/connecting_applications_to_services/sbo-release-notes.adoc +++ /dev/null @@ -1,73 +0,0 @@ -//OpenShift Service Binding Release Notes -:_content-type: ASSEMBLY -[id="servicebinding-release-notes"] -= Release notes for {servicebinding-title} -:context: servicebinding-release-notes -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] - -toc::[] - -The {servicebinding-title} consists of a controller and an accompanying custom resource definition (CRD) for service binding. It manages the data plane for workloads and backing services. The Service Binding Controller reads the data made available by the control plane of backing services. Then, it projects this data to workloads according to the rules specified through the `ServiceBinding` resource. - -With {servicebinding-title}, you can: - -* Bind your workloads together with Operator-managed backing services. -* Automate configuration of binding data. -* Provide service operators a low-touch administrative experience to provision and manage access to services. -* Enrich development lifecycle with a consistent and declarative service binding method that eliminates discrepancies in cluster environments. - -The custom resource definition (CRD) of the {servicebinding-title} supports the following APIs: - -* *Service Binding* with the `binding.operators.coreos.com` API group. -* *Service Binding (Spec API)* with the `servicebinding.io` API group. - -[id="support-matrix"] -== Support matrix - -Some features in the following table are in link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. These experimental features are not intended for production use. - -In the table, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[options="header"] -|=== -|*{servicebinding-title}* 2+|*API Group and Support Status*|*OpenShift Versions* - -|*Version*|*`binding.operators.coreos.com`* |*`servicebinding.io`* | -|1.3.3 |GA |GA |4.9-4.12 -|1.3.1 |GA |GA |4.9-4.11 -|1.3 |GA |GA |4.9-4.11 -|1.2 |GA |GA |4.7-4.11 -|1.1.1 |GA |TP |4.7-4.10 -|1.1 |GA |TP |4.7-4.10 -|1.0.1 |GA |TP |4.7-4.9 -|1.0 |GA |TP |4.7-4.9 - -|=== - -[id="servicebinding-inclusive-language"] -== Making open source more inclusive - -Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see link:https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[Red Hat CTO Chris Wright's message]. - -// Modules included, most to least recent -include::modules/sbo-release-notes-1-3-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-2.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_release-notes-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Understanding Service Binding Operator]. diff --git a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc deleted file mode 100644 index 5a6c5500fe95..000000000000 --- a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-service-binding-operator"] -= Understanding Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: understanding-service-binding-operator - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider suggests a different way to access their secrets and consume them in a workload. In addition, manual configuration and maintenance of this binding together of workloads and backing services make the process tedious, inefficient, and error-prone. - -The {servicebinding-title} enables application developers to easily bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. - -include::modules/sbo-service-binding-terminology.adoc[leveloffset=+1] -include::modules/sbo-about-service-binding-operator.adoc[leveloffset=+1] -include::modules/sbo-key-features.adoc[leveloffset=+1] -include::modules/sbo-api-differences.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_understanding-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. \ No newline at end of file diff --git a/applications/creating_applications/creating-applications-using-cli.adoc b/applications/creating_applications/creating-applications-using-cli.adoc deleted file mode 100644 index 10357412a63d..000000000000 --- a/applications/creating_applications/creating-applications-using-cli.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-applications-using-cli"] -= Creating applications using the CLI -include::_attributes/common-attributes.adoc[] -:context: creating-applications-using-cli - -toc::[] - -You can create an {product-title} application from components that include -source or binary code, images, and templates by using the {product-title} -CLI. - -The set of objects created by `new-app` depends on the artifacts passed as -input: source repositories, images, or templates. - -include::modules/applications-create-using-cli-source-code.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-image.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-template.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-modify.adoc[leveloffset=+1] diff --git a/applications/creating_applications/creating-apps-from-installed-operators.adoc b/applications/creating_applications/creating-apps-from-installed-operators.adoc deleted file mode 100644 index 728bb3c441d3..000000000000 --- a/applications/creating_applications/creating-apps-from-installed-operators.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-apps-from-installed-operators"] -= Creating applications from installed Operators -include::_attributes/common-attributes.adoc[] -:context: creating-apps-from-installed-operators - -toc::[] - -_Operators_ are a method of packaging, deploying, and managing a Kubernetes -application. You can create applications on {product-title} using Operators that -have been installed by a cluster administrator. - -This guide walks developers through an example of creating applications from an -installed Operator using the {product-title} web console. - -[role="_additional-resources"] -.Additional resources - -* See the -xref:../../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Operators] -guide for more on how Operators work and how the Operator Lifecycle Manager is -integrated in {product-title}. - -include::modules/olm-creating-etcd-cluster-from-operator.adoc[leveloffset=+1] diff --git a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc deleted file mode 100644 index 4d93abbdfb4d..000000000000 --- a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-creating-applications-using-developer-perspective"] -= Creating applications using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: odc-creating-applications-using-developer-perspective - -toc::[] - -The *Developer* perspective in the web console provides you the following options from the *+Add* view to create applications and associated services and deploy them on {product-title}: - -* *Getting started resources*: Use these resources to help you get started with Developer Console. You can choose to hide the header using the *Options* menu {kebab}. -** *Creating applications using samples*: Use existing code samples to get started with creating applications on the {product-title}. -** *Build with guided documentation*: Follow the guided documentation to build applications and familiarize yourself with key concepts and terminologies. -** *Explore new developer features*: Explore the new features and resources within the *Developer* perspective. - -* *Developer catalog*: Explore the Developer Catalog to select the required applications, services, or source to image builders, and then add it to your project. -** *All Services*: Browse the catalog to discover services across {product-title}. -** *Database*: Select the required database service and add it to your application. -** *Operator Backed*: Select and deploy the required Operator-managed service. -** *Helm chart*: Select the required Helm chart to simplify deployment of applications and services. -** *Devfile*: Select a devfile from the *Devfile registry* to declaratively define a development environment. -** *Event Source*: Select an event source to register interest in a class of events from a particular system. -+ -[NOTE] -==== -The Managed services option is also available if the RHOAS Operator is installed. -==== - -* *Git repository*: Import an existing codebase, Devfile, or Dockerfile from your Git repository using the *From Git*, *From Devfile*, or *From Dockerfile* options respectively, to build and deploy an application on {product-title}. - -* *Container images*: Use existing images from an image stream or registry to deploy it on to the {product-title}. - -* *Pipelines*: Use Tekton pipeline to create CI/CD pipelines for your software delivery process on the {product-title}. - -* *Serverless*: Explore the *Serverless* options to create, build, and deploy stateless and serverless applications on the {product-title}. -** *Channel*: Create a Knative channel to create an event forwarding and persistence layer with in-memory and reliable implementations. - -* *Samples*: Explore the available sample applications to create, build, and deploy an application quickly. - -* *Quick Starts*: Explore the quick start options to create, import, and run applications with step-by-step instructions and tasks. - -* *From Local Machine*: Explore the *From Local Machine* tile to import or upload files on your local machine for building and deploying applications easily. -** *Import YAML*: Upload a YAML file to create and define resources for building and deploying applications. -** *Upload JAR file*: Upload a JAR file to build and deploy Java applications. - -* *Share my Project*: Use this option to add or remove users to a project and provide accessibility options to them. - -* *Helm Chart repositories*: Use this option to add Helm Chart repositories in a namespace. - -* *Re-ordering of resources*: Use these resources to re-order pinned resources added to your navigation pane. The drag-and-drop icon is displayed on the left side of the pinned resource when you hover over it in the navigation pane. The dragged resource can be dropped only in the section where it resides. - -ifdef::openshift-enterprise,openshift-webscale[] -Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the xref:../../cicd/pipelines/installing-pipelines.adoc#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt.adoc#virt-subscribing-cli_installing-virt[OpenShift Virtualization Operator] are installed, respectively. -endif::[] - -[id="prerequisites_odc-creating-applications-using-developer-perspective"] -== Prerequisites - -To create applications using the *Developer* perspective ensure that: - -* You have xref:../../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have created a project or have access to a project with the appropriate xref:../../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] to create applications and other workloads in {product-title}. - -ifdef::openshift-enterprise,openshift-webscale[] - -To create serverless applications, in addition to the preceding prerequisites, ensure that: - -* You have link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#install-serverless-operator[installed the {ServerlessOperatorName}]. -* You have link:https://docs.openshift.com/serverless/1.28/install/installing-knative-serving.html#installing-knative-serving[created a `KnativeServing` resource in the `knative-serving` namespace]. - -endif::[] - -include::modules/odc-creating-sample-applications.adoc[leveloffset=+1] - -include::modules/odc-using-quickstarts.adoc[leveloffset=+1] - -include::modules/odc-importing-codebase-from-git-to-create-application.adoc[leveloffset=+1] - -include::modules/odc-deploying-container-image.adoc[leveloffset=+1] - -include::modules/odc-deploying-java-applications.adoc[leveloffset=+1] - -include::modules/odc-using-the-devfile-registry.adoc[leveloffset=+1] - -include::modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_odc-creating-applications-using-developer-perspective"] -== Additional resources - -* For more information about Knative routing settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/external-ingress-routing/routing-overview.html#routing-overview[Routing]. -* For more information about domain mapping settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/config-custom-domains/serverless-custom-domains.html#serverless-custom-domains[Configuring a custom domain for a Knative service]. -* For more information about Knative autoscaling settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/autoscaling/serverless-autoscaling-developer.html#serverless-autoscaling-developer[Autoscaling]. -* For more information about adding a new user to a project, see xref:../projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Working with projects]. -* For more information about creating a Helm Chart repository, see xref:../working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#odc-creating-helm-releases-using-developer-perspective_configuring-custom-helm-chart-repositories[Creating Helm Chart repositories]. diff --git a/applications/creating_applications/snippets b/applications/creating_applications/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/applications/creating_applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/applications/deployments/_attributes b/applications/deployments/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/deployments/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc deleted file mode 100644 index 5d18c9f72dba..000000000000 --- a/applications/deployments/deployment-strategies.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="deployment-strategies"] -= Using deployment strategies -include::_attributes/common-attributes.adoc[] -:context: deployment-strategies - -toc::[] - -_Deployment strategies_ are used to change or upgrade applications without downtime so that users barely notice a change. - -Because users generally access applications through a route handled by a router, deployment strategies can focus on `DeploymentConfig` object features or routing features. Strategies that focus on `DeploymentConfig` object features impact all routes that use the application. Strategies that use router features target individual routes. - -Most deployment strategies are supported through the `DeploymentConfig` object, and some additional strategies are supported through router features. - -[id="choosing-deployment-strategies"] -== Choosing a deployment strategy - -Consider the following when choosing a deployment strategy: - -- Long-running connections must be handled gracefully. -- Database conversions can be complex and must be done and rolled back along with the application. -- If the application is a hybrid of microservices and traditional components, downtime might be required to complete the transition. -- You must have the infrastructure to do this. -- If you have a non-isolated test environment, you can break both new and old versions. - -A deployment strategy uses readiness checks to determine if a new pod is ready for use. If a readiness check fails, the `DeploymentConfig` object retries to run the pod until it times out. The default timeout is `10m`, a value set in `TimeoutSeconds` in `dc.spec.strategy.*params`. - -// Rolling strategies -include::modules/deployments-rolling-strategy.adoc[leveloffset=+1] -include::modules/deployments-canary-deployments.adoc[leveloffset=+2] -// Creating rolling deployments -include::modules/creating-rolling-deployments-CLI.adoc[leveloffset=+2] -// Editing a deployment -:context: rolling-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-rolling-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Recreate strategies -include::modules/deployments-recreate-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: recreate-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-recreate-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Custom strategies -include::modules/deployments-custom-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: custom-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] - -include::modules/deployments-lifecycle-hooks.adoc[leveloffset=+1] diff --git a/applications/deployments/images b/applications/deployments/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/deployments/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc deleted file mode 100644 index 5226f871c93a..000000000000 --- a/applications/deployments/managing-deployment-processes.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="deployment-operations"] -= Managing deployment processes -include::_attributes/common-attributes.adoc[] -:context: deployment-operations - -toc::[] - -[id="deploymentconfig-operations"] -== Managing DeploymentConfig objects - -`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated. - -include::modules/deployments-starting-deployment.adoc[leveloffset=+2] -include::modules/deployments-viewing-deployment.adoc[leveloffset=+2] -include::modules/deployments-retrying-deployment.adoc[leveloffset=+2] -include::modules/deployments-rolling-back.adoc[leveloffset=+2] -include::modules/deployments-exec-cmd-in-container.adoc[leveloffset=+2] -include::modules/deployments-viewing-logs.adoc[leveloffset=+2] -include::modules/deployments-triggers.adoc[leveloffset=+2] -include::modules/deployments-setting-triggers.adoc[leveloffset=+3] -include::modules/deployments-setting-resources.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about resource limits and requests, see xref:../../nodes/clusters/nodes-cluster-resource-configure.adoc#nodes-cluster-resource-configure-about_nodes-cluster-resource-configure[Understanding managing application memory]. - -include::modules/deployments-scaling-manually.adoc[leveloffset=+2] -include::modules/deployments-accessing-private-repos.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/deployments-assigning-pods-to-nodes.adoc[leveloffset=+2] -endif::[] - -ifndef::openshift-online[] -include::modules/deployments-running-pod-svc-acct.adoc[leveloffset=+2] -endif::[] - -//// -== Managing Deployments - -Need docs on managing Deployment objects. -//// diff --git a/applications/deployments/modules b/applications/deployments/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/deployments/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/deployments/osd-config-custom-domains-applications.adoc b/applications/deployments/osd-config-custom-domains-applications.adoc deleted file mode 100644 index e652e9b7e075..000000000000 --- a/applications/deployments/osd-config-custom-domains-applications.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="osd-config-custom-domains-applications"] -= Custom domains for applications -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-config-custom-domains-applications - -toc::[] - -You can configure a custom domain for your applications. Custom domains are specific wildcard domains that can be used with {product-title} applications. - -include::modules/osd-applications-config-custom-domains.adoc[leveloffset=+1] -include::modules/osd-applications-renew-custom-domains.adoc[leveloffset=+1] diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc deleted file mode 100644 index 87df7e2548e8..000000000000 --- a/applications/deployments/route-based-deployment-strategies.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="route-based-deployment-strategies"] -= Using route-based deployment strategies -include::_attributes/common-attributes.adoc[] -:context: route-based-deployment-strategies - -toc::[] - -Deployment strategies provide a way for the application to evolve. Some strategies use `Deployment` objects to make changes that are seen by users of all routes that resolve to the application. Other advanced strategies, such as the ones described in this section, use router features in conjunction with `Deployment` objects to impact specific routes. - -//// -This link keeps breaking Travis for some reason. - -[NOTE] -==== -See -xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using deployment strategies] -for more on the basic strategy types. -==== -//// - -The most common route-based strategy is to use a _blue-green deployment_. The new version (the green version) is brought up for testing and evaluation, while the users still use the stable version (the blue version). When ready, the users are switched to the green version. If a problem arises, you can switch back to the blue version. - -A common alternative strategy is to use _A/B versions_ that are both active at the same time and some users use one version, and some users use the other version. This can be used for experimenting with user interface changes and other features to get user feedback. It can also be used to verify proper operation in a production context where problems impact a limited number of users. - -A canary deployment tests the new version but when a problem is detected it quickly falls back to the previous version. This can be done with both of the above strategies. - -The route-based deployment strategies do not scale the number of pods in the services. To maintain desired performance characteristics the deployment configurations might have to be scaled. - -include::modules/deployments-proxy-shards.adoc[leveloffset=+1] -include::modules/deployments-n1-compatibility.adoc[leveloffset=+1] -include::modules/deployments-graceful-termination.adoc[leveloffset=+1] -include::modules/deployments-blue-green.adoc[leveloffset=+1] -include::modules/deployments-ab-testing.adoc[leveloffset=+1] -include::modules/deployments-ab-testing-lb.adoc[leveloffset=+2] diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc deleted file mode 100644 index 09654a5d92f6..000000000000 --- a/applications/deployments/what-deployments-are.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="what-deployments-are"] -= Understanding Deployment and DeploymentConfig objects -include::_attributes/common-attributes.adoc[] -:context: what-deployments-are - -toc::[] - -The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects: - -* A `DeploymentConfig` or `Deployment` object, either of which describes the desired state of a particular component of the application as a pod template. -* `DeploymentConfig` objects involve one or more _replication controllers_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `Deployment` objects involve one or more _replica sets_, a successor of replication controllers. -* One or more pods, which represent an instance of a particular version of an application. - -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -xref:../../applications/deployments/advanced_deployment_strategies.adoc#graceful-termination[graceful shutdown] -xref:../../applications/basic_deployment_operations.adoc#triggers[Triggers] -xref:../../applications/deployment_strategies.adoc#strategies[strategies] -xref:../../applications/deployment_strategies.adoc#lifecycle-hooks[hooks] -xref:../../applications/basic_deployment_operations.adoc#rolling-back-a-deployment[rollbacks] -xref:../../applications/basic_deployment_operations.adoc#scaling[scaling] -xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling] -//// - -[id="what-deployments-are-build-blocks"] -== Building blocks of a deployment - -Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks. - -Users do not have to manipulate replication controllers, replica sets, or pods owned by `DeploymentConfig` objects or deployments. The deployment systems ensure changes are propagated appropriately. - -[TIP] -==== -If the existing deployment strategies are not suited for your use case and you must run manual steps during the lifecycle of your deployment, then you should consider creating a custom deployment strategy. -==== - -The following sections provide further details on these objects. - -include::modules/deployments-replicationcontrollers.adoc[leveloffset=+2] -include::modules/deployments-replicasets.adoc[leveloffset=+2] - -include::modules/deployments-deploymentconfigs.adoc[leveloffset=+1] -include::modules/deployments-kube-deployments.adoc[leveloffset=+1] -include::modules/deployments-comparing-deploymentconfigs.adoc[leveloffset=+1] -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -- xref:../../dev_guide/managing_images.adoc#dev-guide-managing-images[Imagestreams] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#lifecycle-hooks[Lifecycle hooks] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#custom-strategy[Custom deployment strategies] -//// diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc deleted file mode 100644 index 179701df2a45..000000000000 --- a/applications/idling-applications.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="idling-applications"] -= Idling applications -include::_attributes/common-attributes.adoc[] -:context: idling-applications - -toc::[] - -Cluster administrators can idle applications to reduce resource consumption. This is useful when the cluster is deployed on a public cloud where cost is related to resource consumption. - -If any scalable resources are not in use, {product-title} discovers and idles them by scaling their replicas to `0`. The next time network traffic is directed to the resources, the resources are unidled by scaling up the replicas, and normal operation continues. - -Applications are made of services, as well as other scalable resources, such as deployment configs. The action of idling an application involves idling all associated resources. - -include::modules/idle-idling-applications.adoc[leveloffset=+1] -include::modules/idle-unidling-applications.adoc[leveloffset=+1] diff --git a/applications/images b/applications/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/index.adoc b/applications/index.adoc deleted file mode 100644 index 7f3a761b6feb..000000000000 --- a/applications/index.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="building-applications-overview"] -= Building applications overview -include::_attributes/common-attributes.adoc[] -:context: building-applications-overview - -toc::[] - -Using {product-title}, you can create, edit, delete, and manage applications using the web console or command line interface (CLI). - -[id="working-on-a-project"] -== Working on a project - -Using projects, you can organize and manage applications in isolation. You can manage the entire project lifecycle, including xref:../applications/projects/working-with-projects.adoc#working-with-projects[creating, viewing, and deleting a project] in {product-title}. - -After you create the project, you can xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[grant or revoke access to a project] and xref:../applications/projects/working-with-projects.adoc#odc-customizing-available-cluster-roles-using-developer-perspective_projects[manage cluster roles] for the users using the Developer perspective. You can also xref:../applications/projects/configuring-project-creation.adoc#configuring-project-creation[edit the project configuration resource] while creating a project template that is used for automatic provisioning of new projects. - -Using the CLI, you can xref:../applications/projects/creating-project-other-user.adoc#creating-project-other-user[create a project as a different user] by impersonating a request to the {product-title} API. When you make a request to create a new project, the {product-title} uses an endpoint to provision the project according to a customizable template. As a cluster administrator, you can choose to xref:../applications/projects/configuring-project-creation.adoc#disabling-project-self-provisioning_configuring-project-creation[prevent an authenticated user group from self-provisioning new projects]. - -[id="working-on-application"] -== Working on an application - -[id="creating-application"] -=== Creating an application - -To create applications, you must have created a project or have access to a project with the appropriate roles and permissions. You can create an application by using either xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[the Developer perspective in the web console], xref:../applications/creating_applications/creating-apps-from-installed-operators.adoc#creating-apps-from-installed-operators[installed Operators], or xref:../applications/creating_applications/creating-applications-using-cli.adoc#creating-applications-using-cli[the {product-title} CLI]. You can source the applications to be added to the project from Git, JAR files, devfiles, or the developer catalog. - -You can also use components that include source or binary code, images, and templates to create an application by using the {product-title} CLI. With the {product-title} web console, you can create an application from an Operator installed by a cluster administrator. - -[id="maintaining-application"] -=== Maintaining an application - -After you create the application you can use the web console to xref:../applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc#odc-monitoring-project-and-application-metrics-using-developer-perspective[monitor your project or application metrics]. You can also xref:../applications/odc-editing-applications.adoc#odc-editing-applications[edit] or xref:../applications/odc-deleting-applications.adoc#odc-deleting-applications[delete] the application using the web console. -When the application is running, not all applications resources are used. As a cluster administrator, you can choose to xref:../applications/idling-applications.adoc#idling-applications[idle these scalable resources] to reduce resource consumption. - -[id="connecting-application"] -=== Connecting an application to services - -An application uses backing services to build and connect workloads, which vary according to the service provider. Using the xref:../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Service Binding Operator], as a developer, you can bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. You can apply service binding also on xref:../applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc#getting-started-with-service-binding-ibm-power-ibm-z[{ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName} environments]. - -[id="deploying-application"] -=== Deploying an application -You can deploy your application using xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`Deployment` or `DeploymentConfig`] objects and xref:../applications/deployments/managing-deployment-processes.adoc#deployment-operations[manage] them from the web console. You can create xref:../applications/deployments/deployment-strategies.adoc#deployment-strategies[deployment strategies] that help reduce downtime during a change or an upgrade to the application. - -You can also use xref:../applications/working_with_helm_charts/understanding-helm.adoc#understanding-helm[Helm], a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -[id="redhat-marketplace"] -== Using the Red Hat Marketplace - -The xref:../applications/red-hat-marketplace.adoc#red-hat-marketplace[Red Hat Marketplace] is an open cloud marketplace where you can discover and access certified software for container-based environments that run on public clouds and on-premises. \ No newline at end of file diff --git a/applications/modules b/applications/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/odc-deleting-applications.adoc b/applications/odc-deleting-applications.adoc deleted file mode 100644 index 6082134feb51..000000000000 --- a/applications/odc-deleting-applications.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-deleting-applications"] -= Deleting applications -include::_attributes/common-attributes.adoc[] -:context: odc-deleting-applications - -toc::[] - -You can delete applications created in your project. - -include::modules/odc-deleting-applications-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-editing-applications.adoc b/applications/odc-editing-applications.adoc deleted file mode 100644 index 6c27e73f4e89..000000000000 --- a/applications/odc-editing-applications.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-editing-applications"] -= Editing applications -include::_attributes/common-attributes.adoc[] -:context: odc-editing-applications - -toc::[] - -You can edit the configuration and the source code of the application you create using the *Topology* view. - -== Prerequisites - -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create and modify applications in {product-title}. -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective]. -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-editing-source-code-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-editing-application-configuration-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-exporting-applications.adoc b/applications/odc-exporting-applications.adoc deleted file mode 100644 index ebef502465a4..000000000000 --- a/applications/odc-exporting-applications.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-exporting-applications"] -= Exporting applications -include::_attributes/common-attributes.adoc[] -:context: odc-exporting-applications - -toc::[] - -As a developer, you can export your application in the ZIP file format. Based on your needs, import the exported application to another project in the same cluster or a different cluster by using the *Import YAML* option in the *+Add* view. Exporting your application helps you to reuse your application resources and saves your time. - -[id="prerequisites_odc-exporting-applications"] -== Prerequisites - -* You have installed the gitops-primer Operator from the OperatorHub. -+ -[NOTE] -==== -The *Export application* option is disabled in the *Topology* view even after installing the gitops-primer Operator. -==== - -* You have created an application in the *Topology* view to enable *Export application*. - -[id="odc-exporting-applications-procedure"] -== Procedure - -. In the developer perspective, perform one of the following steps: -.. Navigate to the *+Add* view and click *Export application* in the *Application portability* tile. -.. Navigate to the *Topology* view and click *Export application*. - -. Click *OK* in the *Export Application* dialog box. A notification opens to confirm that the export of resources from your project has started. - -. Optional steps that you might need to perform in the following scenarios: -+ -* If you have started exporting an incorrect application, click *Export application* -> *Cancel Export*. -* If your export is already in progress and you want to start a fresh export, click *Export application* -> *Restart Export*. -* If you want to view logs associated with exporting an application, click *Export application* and the *View Logs* link. -+ -image::export-application-dialog-box.png[] - -. After a successful export, click *Download* in the dialog box to download application resources in ZIP format onto your machine. diff --git a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc deleted file mode 100644 index b70a8da2de8b..000000000000 --- a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-monitoring-project-and-application-metrics-using-developer-perspective"] -= Monitoring project and application metrics using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: monitoring-project-and-application-metrics-using-developer-perspective - -toc::[] - - -The *Observe* view in the *Developer* perspective provides options to monitor your project or application metrics, such as CPU, memory, and bandwidth usage, and network related information. - -[id="prerequisites_odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Prerequisites - -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed applications on {product-title}]. -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-monitoring-your-project-metrics.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-application-metrics.adoc[leveloffset=+1] - -include::modules/odc-image-vulnerabilities-breakdown.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-app-vulnerabilities.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Additional resources -* xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] diff --git a/applications/odc-viewing-application-composition-using-topology-view.adoc b/applications/odc-viewing-application-composition-using-topology-view.adoc deleted file mode 100644 index 576a2d56f718..000000000000 --- a/applications/odc-viewing-application-composition-using-topology-view.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-viewing-application-composition-using-topology-view"] -= Viewing application composition using the Topology view -include::_attributes/common-attributes.adoc[] -:context: viewing-application-composition-using-topology-view - -toc::[] - -The *Topology* view in the *Developer* perspective of the web console provides a visual representation of all the applications within a project, their build status, and the components and services associated with them. - -== Prerequisites -To view your applications in the *Topology* view and interact with them, ensure that: - -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create applications and other workloads in {product-title}. -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective]. -* You are in xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-viewing-application-topology.adoc[leveloffset=+1] - -include::modules/odc-interacting-with-applications-and-components.adoc[leveloffset=+1] - -include::modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc[leveloffset=+1] - -include::modules/odc-adding-components-to-an-existing-project.adoc[leveloffset=+1] - -include::modules/odc-grouping-multiple-components.adoc[leveloffset=+1] - -include::modules/odc-adding-services-to-application.adoc[leveloffset=+1] - -include::modules/odc-removing-services-from-application.adoc[leveloffset=+1] - -include::modules/odc-labels-and-annotations-used-for-topology-view.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* See xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] for more information on creating an application from Git. -* See xref:../applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc#odc-connecting-an-application-to-a-service-using-the-developer-perspective[Connecting an application to a service using the Developer perspective]. -* See xref:../applications/odc-exporting-applications.adoc#odc-exporting-applications[Exporting applications] diff --git a/applications/projects/_attributes b/applications/projects/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/projects/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/projects/configuring-project-creation.adoc b/applications/projects/configuring-project-creation.adoc deleted file mode 100644 index 1d9aa09de82f..000000000000 --- a/applications/projects/configuring-project-creation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-project-creation"] -= Configuring project creation -include::_attributes/common-attributes.adoc[] -:context: configuring-project-creation - -toc::[] - -In {product-title}, _projects_ are used to group and isolate related objects. -When a request is made to create a new project using the web console or `oc -new-project` command, an endpoint in {product-title} is used to provision the -project according to a template, which can be customized. - -As -a cluster administrator, you can allow and configure how developers and service -accounts can create, or _self-provision_, their own projects. - -include::modules/about-project-creation.adoc[leveloffset=+1] -include::modules/modifying-template-for-new-projects.adoc[leveloffset=+1] -include::modules/disabling-project-self-provisioning.adoc[leveloffset=+1] -include::modules/customizing-project-request-message.adoc[leveloffset=+1] diff --git a/applications/projects/creating-project-other-user.adoc b/applications/projects/creating-project-other-user.adoc deleted file mode 100644 index 49c9844f7e0a..000000000000 --- a/applications/projects/creating-project-other-user.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-project-other-user"] -= Creating a project as another user -include::_attributes/common-attributes.adoc[] -:context: creating-project-other-user - -toc::[] - -Impersonation allows you to create a project as a different user. - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-project-creation.adoc[leveloffset=+1] diff --git a/applications/projects/images b/applications/projects/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/projects/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/projects/modules b/applications/projects/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/projects/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/projects/snippets b/applications/projects/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/projects/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc deleted file mode 100644 index a155c0139e4a..000000000000 --- a/applications/projects/working-with-projects.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-projects"] -= Working with projects -include::_attributes/common-attributes.adoc[] -:context: projects - -toc::[] - -A _project_ allows a community of users to organize and manage their content in -isolation from other communities. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these projects using the `oc adm new-project` command. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/odc-creating-projects-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/creating-a-project-using-the-CLI.adoc[leveloffset=+1] - -include::modules/viewing-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/viewing-a-project-using-the-CLI.adoc[leveloffset=+1] - -include::modules/odc-providing-project-permissions-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-customizing-available-cluster-roles-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/adding-to-a-project.adoc[leveloffset=+1] - -include::modules/checking-project-status-using-the-web-console.adoc[leveloffset=+1] - -include::modules/checking-project-status-using-the-CLI.adoc[leveloffset=+1] - -include::modules/deleting-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/deleting-a-project-using-the-CLI.adoc[leveloffset=+1] diff --git a/applications/pruning-objects.adoc b/applications/pruning-objects.adoc deleted file mode 100644 index 0b42fd73b34e..000000000000 --- a/applications/pruning-objects.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="pruning-objects"] -= Pruning objects to reclaim resources -include::_attributes/common-attributes.adoc[] -:context: pruning-objects - -toc::[] - -Over time, API objects created in {product-title} can accumulate in the -cluster's etcd data store through normal user operations, such as when building -and deploying applications. - -Cluster administrators can periodically prune older versions of objects from the -cluster that are no longer required. For example, by pruning images you can delete -older images and layers that are no longer in use, but are still taking up disk -space. - -include::modules/pruning-basic-operations.adoc[leveloffset=+1] -include::modules/pruning-groups.adoc[leveloffset=+1] -include::modules/pruning-deployments.adoc[leveloffset=+1] -include::modules/pruning-builds.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../cicd/builds/advanced-build-operations.adoc#builds-build-pruning-advanced-build-operations[Performing advanced builds -> Pruning builds] - -include::modules/pruning-images.adoc[leveloffset=+1] -include::modules/pruning-images-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../registry/accessing-the-registry.adoc#accessing-the-registry[Accessing the registry] -* xref:../registry/securing-exposing-registry.adoc#securing-exposing-registry[Exposing the registry] -* See -xref:../registry/configuring-registry-operator.adoc#configuring-registry-operator[Image -Registry Operator in {product-title}] for information on how to create a -registry route. - -include::modules/pruning-hard-pruning-registry.adoc[leveloffset=+1] -include::modules/pruning-cronjobs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs_nodes-nodes-jobs[Running tasks in pods using jobs] -* xref:../applications/quotas/quotas-setting-across-multiple-projects.adoc#setting-quotas-across-multiple-projects[Resource quotas across multiple projects] -* xref:../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] diff --git a/applications/quotas/_attributes b/applications/quotas/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/quotas/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/quotas/images b/applications/quotas/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/quotas/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/quotas/modules b/applications/quotas/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/quotas/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc deleted file mode 100644 index 471a0343a6ec..000000000000 --- a/applications/quotas/quotas-setting-across-multiple-projects.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-quotas-across-multiple-projects"] -= Resource quotas across multiple projects -include::_attributes/common-attributes.adoc[] -:context: setting-quotas-across-multiple-projects - -toc::[] - -A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas to be shared across multiple projects. Resources used in each selected project are aggregated and that aggregate is used to limit resources across all the selected projects. - -This guide describes how cluster administrators can set and manage resource quotas across multiple projects. - -include::modules/quotas-selecting-projects.adoc[leveloffset=+1] -include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1] -include::modules/quotas-selection-granularity.adoc[leveloffset=+1] diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc deleted file mode 100644 index daf75d747225..000000000000 --- a/applications/quotas/quotas-setting-per-project.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="quotas-setting-per-project"] -= Resource quotas per project -include::_attributes/common-attributes.adoc[] -:context: quotas-setting-per-project - -toc::[] - -A _resource quota_, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per project. It can limit the quantity of objects that can be created in a project by type, as well as the total amount of compute resources and storage that might be consumed by resources in that project. - -This guide describes how resource quotas work, how cluster administrators can set and manage resource quotas on a per project basis, and how developers and cluster administrators can view them. - -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] -include::modules/quotas-sample-resource-quotas-def.adoc[leveloffset=+1] -include::modules/quotas-creating-a-quota.adoc[leveloffset=+1] -include::modules/quotas-creating-object-count-quotas.adoc[leveloffset=+2] -include::modules/setting-resource-quota-for-extended-resources.adoc[leveloffset=+2] -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-requiring-explicit-quota.adoc[leveloffset=+1] diff --git a/applications/quotas/snippets b/applications/quotas/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/quotas/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/red-hat-marketplace.adoc b/applications/red-hat-marketplace.adoc deleted file mode 100644 index d875e3eeaac7..000000000000 --- a/applications/red-hat-marketplace.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="red-hat-marketplace"] -= Using the Red Hat Marketplace -include::_attributes/common-attributes.adoc[] -:context: red-hat-marketplace - -toc::[] - -The link:https://marketplace.redhat.com[Red Hat Marketplace] is an open cloud marketplace that makes it easy to discover and access certified software for container-based environments that run on public clouds and on-premises. - -include::modules/red-hat-marketplace-features.adoc[leveloffset=+1] diff --git a/applications/snippets b/applications/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/applications/working-with-quotas.adoc b/applications/working-with-quotas.adoc deleted file mode 100644 index dfa5dfd6b866..000000000000 --- a/applications/working-with-quotas.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-quotas"] -= Working with quotas -include::_attributes/common-attributes.adoc[] -:context: working-with-quotas - -toc::[] - -A _resource quota_, defined by a ResourceQuota object, provides constraints that -limit aggregate resource consumption per project. It can limit the quantity of -objects that can be created in a project by type, as well as the total amount of -compute resources and storage that may be consumed by resources in that project. - -An _object quota count_ places a defined quota on all standard namespaced resource -types. When using a resource quota, an object is charged against the quota if it -exists in server storage. These types of quotas are useful to protect against -exhaustion of storage resources. - -This guide describes how resource quotas work and how developers can work with -and view them. - -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/_attributes b/applications/working_with_helm_charts/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/working_with_helm_charts/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc deleted file mode 100644 index 933193d52908..000000000000 --- a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-custom-helm-chart-repositories"] -= Configuring custom Helm chart repositories -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-helm-chart-repositories - -toc::[] - -[role="_abstract"] -You can create Helm releases on an {product-title} cluster using the following methods: - -* The CLI. -* The *Developer* perspective of the web console. - -The *Developer Catalog*, in the *Developer* perspective of the web console, displays the Helm charts available in the cluster. By default, it lists the Helm charts from the Red Hat OpenShift Helm chart repository. For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. - -As a cluster administrator, you can add multiple cluster-scoped and namespace-scoped Helm chart repositories, separate from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -As a regular user or project member with the appropriate role-based access control (RBAC) permissions, you can add multiple namespace-scoped Helm chart repositories, apart from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -In the *Developer* perspective of the web console, you can use the *Helm* page to: - -* Create Helm Releases and Repositories using the *Create* button. - -* Create, update, or delete a cluster-scoped or namespace-scoped Helm chart repository. - -* View the list of the existing Helm chart repositories in the Repositories tab, which can also be easily distinguished as either cluster scoped or namespace scoped. - -include::modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/odc-creating-helm-releases-using-developer-perspective.adoc[leveloffset=+1] - -== Using Helm in the web terminal - -You can use Helm by xref:../../web_console/web_terminal/odc-using-web-terminal.adoc#odc-access-web-terminal_odc-using-web-terminal[Accessing the web terminal] in the *Developer* perspective of the web console. - -include::modules/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1] - -include::modules/helm-adding-helm-chart-repositories.adoc[leveloffset=+1] - -include::modules/helm-adding-namespace-scoped-helm-chart-repositories.adoc[leveloffset=+1] - -include::modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc[leveloffset=+1] - -include::modules/helm-filtering-helm-charts-by-certification-level.adoc[leveloffset=+1] - -include::modules/helm-disabling-helm-chart-repositories.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/images b/applications/working_with_helm_charts/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/working_with_helm_charts/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/working_with_helm_charts/installing-helm.adoc b/applications/working_with_helm_charts/installing-helm.adoc deleted file mode 100644 index 59a50498563a..000000000000 --- a/applications/working_with_helm_charts/installing-helm.adoc +++ /dev/null @@ -1,106 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-helm"] -= Installing Helm -include::_attributes/common-attributes.adoc[] -:context: installing-helm - -toc::[] - -The following section describes how to install Helm on different platforms using the CLI. - -You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*. - -.Prerequisites -* You have installed Go, version 1.13 or higher. - -== On Linux - -. Download the Helm binary and add it to your path: - -* Linux (x86_64, amd64) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm ----- - -* Linux on {ibmzProductName} and {linuxoneProductName} (s390x) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-s390x -o /usr/local/bin/helm ----- - -* Linux on {ibmpowerProductName} (ppc64le) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-ppc64le -o /usr/local/bin/helm ----- - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- - -== On Windows 7/8 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Right click *Start* and click *Control Panel*. -. Select *System and Security* and then click *System*. -. From the menu on the left, select *Advanced systems settings* and click *Environment Variables* at the bottom. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the folder with the `.exe` file into the field or click *Browse* and select the directory, and click *OK*. - -== On Windows 10 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Click *Search* and type `env` or `environment`. -. Select *Edit environment variables for your account*. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the directory with the exe file into the field or click *Browse* and select the directory, and click *OK*. - - -== On MacOS -. Download the Helm binary and add it to your path: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-darwin-amd64 -o /usr/local/bin/helm ----- - - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- diff --git a/applications/working_with_helm_charts/modules b/applications/working_with_helm_charts/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/working_with_helm_charts/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc deleted file mode 100644 index cfecb6b7e9b3..000000000000 --- a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-working-with-helm-releases"] -= Working with Helm releases -include::_attributes/common-attributes.adoc[] -:context: working-with-helm-releases - -toc::[] - -You can use the *Developer* perspective in the web console to update, rollback, or delete a Helm release. - -== Prerequisites - -* You have logged in to the web console and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - - -include::modules/odc-upgrading-helm-release.adoc[leveloffset=+1] - -include::modules/odc-rolling-back-helm-release.adoc[leveloffset=+1] - -include::modules/odc-deleting-helm-release.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/snippets b/applications/working_with_helm_charts/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/working_with_helm_charts/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/understanding-helm.adoc b/applications/working_with_helm_charts/understanding-helm.adoc deleted file mode 100644 index 83aead71501f..000000000000 --- a/applications/working_with_helm_charts/understanding-helm.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-helm"] -= Understanding Helm -include::_attributes/common-attributes.adoc[] -:context: understanding-helm - -toc::[] - -[role="_abstract"] -Helm is a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -Helm uses a packaging format called _charts_. -A Helm chart is a collection of files that describes the {product-title} resources. - -Creating a chart in a cluster creates a running instance of the chart known as a _release_. - -Each time a chart is created, or a release is upgraded or rolled back, an incremental revision is created. - - -== Key features - -Helm provides the ability to: - -* Search through a large collection of charts stored in the chart repository. -* Modify existing charts. -* Create your own charts with {product-title} or Kubernetes resources. -* Package and share your applications as charts. - -//[NOTE] -//==== -// In {product-title} 4.10 and 4.11, Helm is disabled for the xref:../../web_console/web-console.adoc#multi-cluster-about_web-console[Multicluster Console] (Technology Preview). -//==== - -== Red Hat Certification of Helm charts for OpenShift - -You can choose to verify and certify your Helm charts by Red Hat for all the components you will be deploying on the Red Hat {product-title}. Charts go through an automated Red Hat OpenShift certification workflow that guarantees security compliance as well as best integration and experience with the platform. Certification assures the integrity of the chart and ensures that the Helm chart works seamlessly on Red Hat OpenShift clusters. - -[role="_additional-resources"] -== Additional resources -* For more information on how to certify your Helm charts as a Red Hat partner, see link:https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/helm-chart-certification/overview[Red Hat Certification of Helm charts for OpenShift]. -* For more information on OpenShift and Container certification guides for Red Hat partners, see link:https://access.redhat.com/documentation/en-us/red_hat_software_certification/8.51/html-single/red_hat_software_certification_workflow_guide/index#con_container-certification_openshift-sw-cert-workflow-introduction-to-redhat-openshift-operator-certification[Partner Guide for OpenShift and Container Certification]. -* For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. -* You can view the available charts at the link:https://marketplace.redhat.com/en-us/documentation/access-red-hat-marketplace[Red Hat Marketplace]. For more information, see xref:../../applications/red-hat-marketplace.adoc#red-hat-marketplace[Using the Red Hat Marketplace]. diff --git a/applications_and_projects/working-with-projects.adoc b/applications_and_projects/working-with-projects.adoc deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/architecture/_attributes b/architecture/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/architecture/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/architecture/admission-plug-ins.adoc b/architecture/admission-plug-ins.adoc deleted file mode 100644 index c20f406a8c97..000000000000 --- a/architecture/admission-plug-ins.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="admission-plug-ins"] -= Admission plugins -include::_attributes/common-attributes.adoc[] -:context: admission-plug-ins - -toc::[] - -// Concept modules -include::modules/admission-plug-ins-about.adoc[leveloffset=+1] - -include::modules/admission-plug-ins-default.adoc[leveloffset=+1] - -include::modules/admission-webhooks-about.adoc[leveloffset=+1] - -include::modules/admission-webhook-types.adoc[leveloffset=+1] - -// Procedure module -include::modules/configuring-dynamic-admission.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="admission-plug-ins-additional-resources"] -== Additional resources - -ifdef::openshift-enterprise,openshift-webscale[] -* xref:../networking/hardware_networks/configuring-sriov-operator.adoc#configuring-sriov-operator[Limiting custom network resources managed by the SR-IOV network device plugin] -endif::[] - -* xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Defining tolerations that enable taints to qualify which pods should be scheduled on a node] - -* xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority class validation] diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc deleted file mode 100644 index 39786a08a45d..000000000000 --- a/architecture/architecture-installation.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-installation"] -= Installation and update -include::_attributes/common-attributes.adoc[] -:context: architecture-installation - -toc::[] - -include::modules/installation-overview.adoc[leveloffset=+1] - -include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+2] - -include::modules/installation-process.adoc[leveloffset=+2] - -[discrete] -=== Installation scope - -The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. - -[role="_additional-resources"] -.Additional resources - -* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources. - -include::modules/update-service-overview.adoc[leveloffset=+1] - -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="architecture-installation-next-steps"] -== Next steps - -* xref:../installing/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] diff --git a/architecture/architecture-rhcos.adoc b/architecture/architecture-rhcos.adoc deleted file mode 100644 index 50eacc440e5a..000000000000 --- a/architecture/architecture-rhcos.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-rhcos"] -= {op-system-first} -include::_attributes/common-attributes.adoc[] -:context: architecture-rhcos - -toc::[] - -include::modules/rhcos-about.adoc[leveloffset=+1] -include::modules/ignition-config-viewing.adoc[leveloffset=+1] -include::modules/digging-into-machine-config.adoc[leveloffset=+1] diff --git a/architecture/architecture.adoc b/architecture/architecture.adoc deleted file mode 100644 index 9ab4f4fa36cc..000000000000 --- a/architecture/architecture.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture"] -= {product-title} architecture -include::_attributes/common-attributes.adoc[] -:context: architecture - -toc::[] - -include::modules/architecture-platform-introduction.adoc[leveloffset=+1] - -include::modules/architecture-kubernetes-introduction.adoc[leveloffset=+2] - -include::modules/architecture-container-application-benefits.adoc[leveloffset=+2] - -include::modules/architecture-platform-benefits.adoc[leveloffset=+2] -//// -== User facing components -* Workloads (Deployments, Jobs, ReplicaSets, etc) -* Operator Lifecycle Manager -* xref:../cicd/builds/understanding-image-builds.adoc[Builds] - The build component -provides an API and infrastructure for producing new container images using a -variety of techniques including industry standard Dockerfiles and publishing -them to either the cluster image registry, or an external registry. It also -provides integration with Jenkins based pipeline continuous integration -workflows. -* xref:../registry/index.adoc[Image Registry] - -The image registry provides a scalable repository for storing and retrieving -container images that are produced by and run on the cluster. Image access is -integrated with the cluster's role-based access controls and user authentication -system. -* xref:../openshift_images/images-understand.adoc[Image -streams] - The imagestream API provides an abstraction over container images -that exist in registries. It allows workloads to reference an image indirectly, -retains a history of the images that have been referenced, and allows -notification when an image is updated with a new version. -//// - -include::modules/cluster-entitlements.adoc[leveloffset=+2] diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc deleted file mode 100644 index a8b4c5d1f258..000000000000 --- a/architecture/argocd.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="argocd"] -= Using ArgoCD with {product-title} -include::_attributes/common-attributes.adoc[] - -:context: argocd - -toc::[] - -[id="argocd-what"] -== What does ArgoCD do? - -ArgoCD is a declarative continuous delivery tool that leverages GitOps to maintain cluster resources. ArgoCD is implemented as a controller that continuously monitors application definitions and configurations defined in a Git repository and compares the specified state of those configurations with their live state on the cluster. Configurations that deviate from their specified state in the Git repository are classified as OutOfSync. ArgoCD reports these differences and allows administrators to automatically or manually resync configurations to the defined state. - -ArgoCD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters. - -[id="argocd-support"] -== Statement of support - -Red Hat does not provide support for this tool. To obtain support for ArgoCD, see link:https://argoproj.github.io/argo-cd/SUPPORT/[Support] in the ArgoCD documentation. - -[id="argocd-documentation"] -== ArgoCD documentation - -For more information about using ArgoCD, see the link:https://argoproj.github.io/argo-cd/[ArgoCD documentation]. diff --git a/architecture/cicd_gitops.adoc b/architecture/cicd_gitops.adoc deleted file mode 100644 index 09bee1d19c1b..000000000000 --- a/architecture/cicd_gitops.adoc +++ /dev/null @@ -1,60 +0,0 @@ -:_content-type: ASSEMBLY -[id="cicd_gitops"] -= The CI/CD methodology and practice -include::_attributes/common-attributes.adoc[] -:context: cicd_gitops - -toc::[] - -Using a _continuous integration/continuous delivery_ (CI/CD) methodology enables you to regularly deliver applications to customers by introducing automation into the stages of application development, from integration and testing phases to delivery and deployment. The CI/CD process is often referred to as a "CI/CD pipeline." The main concepts attributed to CI/CD are continuous integration, continuous delivery, and continuous deployment. - -[id="cicd_admin"] -== CI/CD for cluster administration and application configuration management - -_Continuous integration_ is an automation process for developers. Code changes to an application are regularly built, tested, and merged to a shared repository. - -_Continuous delivery_ and _continuous deployment_ are closely related concepts that are sometimes used interchangeably and refer to automation of the pipeline. -Continuous delivery uses automation to ensure that a developer's changes to an application are tested and sent to a repository, where an operations team can deploy them to a production environment. Continuous deployment enables the release of changes, starting from the repository and ending in production. Continuous deployment speeds up application delivery and prevents the operations team from getting overloaded. - -[id="cicd_gitops_methodology"] -== The GitOps methodology and practice - -_GitOps_ is a set of practices that use Git pull requests to manage infrastructure and application configurations. The Git repository in GitOps is the only source of truth for system and application configuration. The repository contains the entire state of the system so that the trail of changes to the system state are visible and auditable. GitOps enables you to implement a DevOps methodology. - -You can use GitOps tooling to create repeatable and predictable processes for managing and recreating {product-title} clusters and applications. By using GitOps, you can address the issues of infrastructure and application configuration sprawl. It simplifies the propagation of infrastructure and application configuration changes across multiple clusters by defining your infrastructure and applications definitions as “code.” Implementing GitOps for your cluster configuration files can make automated installation easier and allow you to configure automated cluster customizations. You can apply the core principles of developing and maintaining software in a Git repository to the creation and management of your cluster and application configuration files. - -By using {product-title} to automate both your cluster configuration and container development process, you can pick and choose where and when to adopt GitOps practices. Using a CI pipeline that pairs with your GitOps strategy and execution plan is ideal. {product-title} provides the flexibility to choose when and how you integrate this methodology into your business practices and pipelines. - -With GitOps integration, you can declaratively configure and store your {product-title} cluster configuration - -GitOps works well with {product-title} because you can both declaratively configure clusters and store the state of the cluster configuration in Git. For more information, see xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations]. - -[id="cicd_gitops_cluster_administration"] -=== GitOps for single-cluster and multi-cluster administration - -Whether you need one or more independent or cooperative {product-title} clusters, you can use a GitOps strategy to manage the following tasks: - -* Ensure that the clusters have similar states for configuration, monitoring, or storage. -* Recover or recreate clusters from a known state. -* Create clusters with a known state. -* Apply or revert configuration changes to multiple {product-title} clusters. -* Associate templated configuration with different environments. - -[id="cicd_gitops_application_configuration"] -=== GitOps for application configuration management - -You can also use GitOps practices to manage application configuration. This practice ensures consistency in applications when you deploy them to different clusters in different environments, like development, stage, and production. Managing application configuration with GitOps is also beneficial when you must deploy applications across multiple clusters, whether on-cloud or on-premise, for availability and scalability purposes. - -You can use a GitOps strategy to: - -* Promote applications across clusters, from stage to production. -* Apply or revert application changes to multiple {product-title} clusters. - -[id="cicd_gitops_integrators"] -=== GitOps technology providers and integrators - -There are several community offerings and third-party vendors that provide a high level of integration with {product-title}. - -You can integrate GitOps into {product-title} with the following community partners and third-party integrators: - -* xref:../architecture/argocd.adoc#argocd[ArgoCD] diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc deleted file mode 100644 index ea482bd64c3f..000000000000 --- a/architecture/control-plane.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="control-plane"] -= Control plane architecture -include::_attributes/common-attributes.adoc[] -:context: control-plane - -toc::[] - -The _control plane_, which is composed of control plane machines, manages the {product-title} cluster. The control plane machines manage workloads on the compute machines, which are also known as worker machines. The cluster itself manages all upgrades to the machines by the actions of the Cluster Version Operator (CVO), the Machine Config Operator, and a set of individual Operators. - -include::modules/architecture-machine-config-pools.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-drift-detection_post-install-machine-configuration-tasks[Understanding configuration drift detection]. - -include::modules/architecture-machine-roles.adoc[leveloffset=+1] - -include::modules/operators-overview.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/arch-cluster-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-operators-ref[Cluster Operators reference] -endif::[] - -include::modules/arch-olm-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* For more details on running add-on Operators in {product-title}, see the _Operators_ guide sections on xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM)] and xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[OperatorHub]. -* For more details on the Operator SDK, see xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators]. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/arch-platform-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/admin/olm-managing-po.adoc#olm-managing-po[Managing platform Operators] -* xref:../operators/admin/olm-managing-po.adoc#olm-po-techpreview_olm-managing-po[Technology Preview restrictions for platform Operators] -* xref:../operators/understanding/olm-packaging-format.adoc#olm-rukpak-about_olm-packaging-format[RukPak component and packaging format] -* xref:../installing/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] -endif::[] - -include::modules/understanding-machine-config-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For more information about detecting configuration drift, see xref:../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-drift-detection_post-install-machine-configuration-tasks[Understanding configuration drift detection]. - -* For information about preventing the control plane machines from rebooting after the Machine Config Operator makes changes to the machine configuration, see xref:../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-disabling-autoreboot-mco_troubleshooting-operator-issues[Disabling Machine Config Operator from automatically rebooting]. - -include::modules/etcd-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] -* xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] - -include::modules/hosted-control-planes-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-addon-intro[HyperShift add-on (Technology Preview)] - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes (Technology Preview)] - -include::modules/hosted-control-planes-concepts-personas.adoc[leveloffset=+2] -include::modules/hosted-control-planes-version-support.adoc[leveloffset=+2] diff --git a/architecture/images b/architecture/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/architecture/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/architecture/index.adoc b/architecture/index.adoc deleted file mode 100644 index d8497da1a830..000000000000 --- a/architecture/index.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-overview"] -= Architecture overview -include::_attributes/common-attributes.adoc[] -:context: architecture-overview - -toc::[] - -{product-title} is a cloud-based Kubernetes container platform. -The foundation of {product-title} is based on Kubernetes and therefore shares the same technology. -To learn more about {product-title} and Kubernetes, see xref:../architecture/architecture.adoc#architecture[product architecture]. - -include::modules/openshift-architecture-common-terms.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information on networking, see xref:../networking/understanding-networking.adoc#understanding-networking[{product-title} networking]. -* For more information on storage, see xref:../storage/index.adoc#index[{product-title} storage]. -* For more information on authentication, see xref:../authentication/index.adoc#index[{product-title} authentication]. -* For more information on Operator Lifecycle Manager (OLM), see xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[OLM]. -* For more information on logging, see xref:../logging/viewing-resource-logs.adoc#viewing-resource-logs[{product-title} Logging]. -* For more information on over-the-air (OTA) updates, see xref:../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[Introduction to OpenShift updates]. - -[id="about-installation-and-updates"] -== About installation and updates - -As a cluster administrator, you can use the {product-title} xref:../architecture/architecture-installation.adoc#architecture-installation[installation program] to install and deploy a cluster by using one of the following methods: - -* Installer-provisioned infrastructure -* User-provisioned infrastructure - -[id="about-control-planes"] -== About the control plane - -The xref:../architecture/control-plane.adoc#control-plane[control plane] manages the worker nodes and the pods in your cluster. You can configure nodes with the use of machine config pools (MCPs). -MCPs are groups of machines, such as control plane components or user workloads, that are based on the resources that they handle. -{product-title} assigns different roles to hosts. These roles define the function of a machine in a cluster. -The cluster contains definitions for the standard control plane and worker role types. - -You can use Operators to package, deploy, and manage services on the control plane. -Operators are important components in {product-title} because they provide the following services: - -* Perform health checks -* Provide ways to watch applications -* Manage over-the-air updates -* Ensure applications stay in the specified state - -[id="about-containerized-applications-for-developers"] -== About containerized applications for developers - -As a developer, you can use different tools, methods, and formats to xref:../architecture/understanding-development.adoc#understanding-development[develop your containerized application] based on your unique requirements, for example: - -* Use various build-tool, base-image, and registry options to build a simple container application. -* Use supporting components such as OperatorHub and templates to develop your application. -* Package and deploy your application as an Operator. - -You can also create a Kubernetes manifest and store it in a Git repository. -Kubernetes works on basic units called pods. A pod is a single instance of a running process in your cluster. Pods can contain one or more containers. -You can create a service by grouping a set of pods and their access policies. -Services provide permanent internal IP addresses and host names for other applications to use as pods are created and destroyed. Kubernetes defines workloads based on the type of your application. - -[id="coreos-and-ignition"] -== About {op-system-first} and Ignition - -As a cluster administrator, you can perform the following {op-system-first} tasks: - -** Learn about the next generation of xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[single-purpose container operating system technology]. -** Choose how to configure {op-system-first} -** Choose how to deploy {op-system-first}: -*** Installer-provisioned deployment -*** User-provisioned deployment - -The {product-title} installation program creates the Ignition configuration files that you need to deploy your cluster. -{op-system-first} uses Ignition during the initial configuration to perform common disk tasks, such as partitioning, formatting, writing files, and configuring users. -During the first boot, Ignition reads its configuration from the installation media or the location that you specify and applies the configuration to the machines. - -You can learn how xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[Ignition works], the process for a {op-system-first} machine in an {product-title} cluster, view Ignition configuration files, and change Ignition configuration after an installation. - -[id="about-admission-plug-ins"] -== About admission plugins -You can use xref:../architecture/admission-plug-ins.adoc#admission-plug-ins[admission plugins] to regulate how {product-title} functions. After a resource request is authenticated and authorized, admission plugins intercept the resource request to the master API to validate resource requests and to ensure that scaling policies are adhered to. -Admission plugins are used to enforce security policies, resource limitations, or configuration requirements. diff --git a/architecture/mce-overview-ocp.adoc b/architecture/mce-overview-ocp.adoc deleted file mode 100644 index 869f970703e4..000000000000 --- a/architecture/mce-overview-ocp.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="mce-overview-ocp"] -= About multicluster engine for Kubernetes operator -include::_attributes/common-attributes.adoc[] -:context: mce-overview-ocp - -toc::[] - -One of the challenges of scaling Kubernetes environments is managing the lifecycle of a growing fleet. To meet that challenge, you can use multicluster engine for Kubernetes operator (MCE). The operator delivers full lifecycle capabilities for managed {product-title} clusters and partial lifecycle management for other Kubernetes distributions. It is available in two ways: - -* As a standalone operator that you install as part of your {product-title} or {oke} subscription -* As part of link:https://access.redhat.com/products/red-hat-advanced-cluster-management-for-kubernetes[Red Hat Advanced Cluster Management for Kubernetes] - -[id="mce-on-ocp"] -== Cluster management with multicluster engine on {product-title} - -When you enable multicluster engine on {product-title}, you gain the following capabilities: - -* xref:../architecture/control-plane.adoc#hosted-control-planes-overview_control-plane[Hosted control planes], which is a feature that is based on the HyperShift project. With a centralized hosted control plane, you can operate {product-title} clusters in a hyperscale manner. -* Hive, which provisions self-managed {product-title} clusters to the hub and completes the initial configurations for those clusters. -* klusterlet agent, which registers managed clusters to the hub. -* Infrastructure Operator, which manages the deployment of the Assisted Service to orchestrate on-premise bare metal and vSphere installations of {product-title}, such as SNO on bare metal. The Infrastructure Operator includes xref:../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-challenges-of-far-edge-deployments_ztp-deploying-far-edge-clusters-at-scale[{ztp-first}], which fully automates cluster creation on bare metal and vSphere provisioning with GitOps workflows to manage deployments and configuration changes. -* Open cluster management, which provides resources to manage Kubernetes clusters. - -The multicluster engine is included with your {product-title} support subscription and is delivered separately from the core payload. To start to use multicluster engine, you deploy the {product-title} cluster and then install the operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#mce-install-intro[Installing and upgrading multicluster engine operator]. - -[id="mce-on-rhacm"] -== Cluster management with Red Hat Advanced Cluster Management - -If you need cluster management capabilities beyond what {product-title} with multicluster engine can provide, consider Red Hat Advanced Cluster Management. The multicluster engine is an integral part of Red Hat Advanced Cluster Management and is enabled by default. - -[id="mce-additional-resources-ocp"] -== Additional resources - -For the complete documentation for multicluster engine, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#doc-wrapper[Cluster lifecycle with multicluster engine documentation], which is part of the product documentation for Red Hat Advanced Cluster Management. diff --git a/architecture/modules b/architecture/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/architecture/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/architecture/ocm-overview-ocp.adoc b/architecture/ocm-overview-ocp.adoc deleted file mode 100644 index d1eaf4095dd4..000000000000 --- a/architecture/ocm-overview-ocp.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="ocm-overview-ocp"] -= Red Hat OpenShift Cluster Manager -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: ocm-overview-ocp -toc::[] - -{cluster-manager-first} is a managed service where you can install, modify, operate, and upgrade your Red Hat OpenShift clusters. This service allows you to work with all of your organization’s clusters from a single dashboard. - -{cluster-manager} guides you to install {OCP}, Red Hat OpenShift Service on AWS (ROSA), and {product-short-name} clusters. It is also responsible for managing both {OCP} clusters after self-installation as well as your ROSA and {product-short-name} clusters. - -You can use {cluster-manager} to do the following actions: - -* Create new clusters -* View cluster details and metrics -* Manage your clusters with tasks such as scaling, changing node labels, networking, authentication -* Manage access control -* Monitor clusters -* Schedule upgrades - -include::modules/ocm-accessing.adoc[leveloffset=+1] - -[id="ocm-general-actions-ocp"] -== General actions - -On the top right of the cluster page, there are some actions that a user can perform on the entire cluster: - -* **Open console** launches a web console so that the cluster owner can issue commands to the cluster. -* **Actions** drop-down menu allows the cluster owner to rename the display name of the cluster, change the amount of load balancers and persistent storage on the cluster, if applicable, manually set the node count, and delete the cluster. -* **Refresh** icon forces a refresh of the cluster. - -[id="ocm-cluster-tabs-ocp"] -== Cluster tabs - -Selecting an active, installed cluster shows tabs associated with that cluster. The following tabs display after the cluster's installation completes: - -* Overview -* Access control -* Add-ons -* Networking -* Insights Advisor -* Machine pools -* Support -* Settings - -include::modules/ocm-overview-tab.adoc[leveloffset=+2] -include::modules/ocm-accesscontrol-tab.adoc[leveloffset=+2] -include::modules/ocm-addons-tab.adoc[leveloffset=+2] -include::modules/ocm-insightsadvisor-tab.adoc[leveloffset=+2] -include::modules/ocm-machinepools-tab.adoc[leveloffset=+2] -include::modules/ocm-support-tab.adoc[leveloffset=+2] -include::modules/ocm-settings-tab.adoc[leveloffset=+2] - -[id="ocm-additional-resources-ocp"] -== Additional resources - -* For the complete documentation for {cluster-manager}, see link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2022/html-single/managing_clusters/index[{cluster-manager} documentation]. diff --git a/architecture/snippets b/architecture/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/architecture/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc deleted file mode 100644 index 0d7313848322..000000000000 --- a/architecture/understanding-development.adoc +++ /dev/null @@ -1,382 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-development"] -= Understanding {product-title} development -include::_attributes/common-attributes.adoc[] -:context: understanding-development - -toc::[] - -To fully leverage the capability of containers when developing and running -enterprise-quality applications, ensure your environment is supported by tools -that allow containers to be: - -* Created as discrete microservices that can be connected to other -containerized, and non-containerized, services. For example, you might want to -join your application with a database or attach a monitoring application to it. - -* Resilient, so if a server crashes or needs to go down for maintenance or to be -decommissioned, containers can start on another machine. - -* Automated to pick up code changes automatically and then start and deploy new -versions of themselves. - -* Scaled up, or replicated, to have more instances serving clients as demand -increases and then spun down to fewer instances as demand declines. - -* Run in different ways, depending on the type of application. For example, one -application might run once a month to produce a report and then exit. Another -application might need to run constantly and be highly available to clients. - -* Managed so you can watch the state of your application and react when -something goes wrong. - -Containers’ widespread acceptance, and the resulting requirements for tools and -methods to make them enterprise-ready, resulted in many options for them. - -The rest of this section explains options for -assets you can create when you build and deploy containerized Kubernetes -applications in {product-title}. It also describes which approaches you might -use for different kinds of applications and development requirements. - -[id="developing-containerized-applications"] -== About developing containerized applications - -You can approach application development with containers in many ways, and -different approaches might be more appropriate for different situations. To -illustrate some of this variety, the series of approaches that is presented -starts with developing a single container and ultimately deploys that container -as a mission-critical application for a large enterprise. These approaches -show different tools, formats, and methods that you can employ with containerized -application development. This topic describes: - -* Building a simple container and storing it in a registry -* Creating a Kubernetes manifest and saving it to a Git repository -* Making an Operator to share your application with others - -[id="building-simple-container"] -== Building a simple container - -You have an idea for an application and you want to containerize it. - -First you require a tool for building a container, like buildah or docker, -and a file that describes what goes in your container, which is typically a -link:https://docs.docker.com/engine/reference/builder/[Dockerfile]. - -Next, you require a location to push the resulting container image so you can -pull it to run anywhere you want it to run. This location is a container -registry. - -Some examples of each of these components are installed by default on most -Linux operating systems, except for the Dockerfile, which you provide yourself. - -The following diagram displays the process of building and pushing an image: - -.Create a simple containerized application and push it to a registry -image::create-push-app.png[Creating and pushing a containerized application] - -If you use a computer that runs {op-system-base-full} as the operating -system, the process of creating a containerized application requires the -following steps: - -. Install container build tools: {op-system-base} contains a set of tools that includes -podman, buildah, and skopeo that you use to build and manage containers. -. Create a Dockerfile to combine base image and software: Information about -building your container goes into a file that is named `Dockerfile`. In that -file, you identify the base image you build from, the software packages you -install, and the software you copy into the container. You also identify -parameter values like network ports that you expose outside the container and -volumes that you mount inside the container. Put your Dockerfile and the -software you want to containerize in a directory on your {op-system-base} system. -. Run buildah or docker build: Run the `buildah build-using-dockerfile` or -the `docker build` command to pull your chosen base image to the local system and -create a container image that is stored locally. You can also build container images -without a Dockerfile by using buildah. -. Tag and push to a registry: Add a tag to your new container image that -identifies the location of the registry in which you want to store and share -your container. Then push that image to the registry by running the -`podman push` or `docker push` command. -. Pull and run the image: From any system that has a container client tool, -such as podman or docker, run a command that identifies your new image. -For example, run the `podman run ` or `docker run ` -command. Here `` is the name of your new container image, which -resembles `quay.io/myrepo/myapp:latest`. The registry might require credentials -to push and pull images. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -For more details on the process of building container images, pushing them to -registries, and running them, see -xref:../cicd/builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah]. -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -[id="container-build-tool-options"] -=== Container build tool options - -Building and managing containers with buildah, podman, and skopeo results in industry standard container images that include features specifically tuned for deploying containers in {product-title} or other Kubernetes environments. These tools are daemonless and can run without root privileges, requiring less overhead to run them. - -[IMPORTANT] -==== -Support for Docker Container Engine as a container runtime is deprecated in Kubernetes 1.20 and will be removed in a future release. However, Docker-produced images will continue to work in your cluster with all runtimes, including CRI-O. For more information, see the link:https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/[Kubernetes blog announcement]. -==== - -When you ultimately run your containers in {product-title}, you use the -link:https://cri-o.io/[CRI-O] container engine. CRI-O runs on every worker and -control plane machine in an {product-title} cluster, but CRI-O is not yet supported as -a standalone runtime outside of {product-title}. - -[id="base-image-options"] -=== Base image options - -The base image you choose to build your application on contains a set of -software that resembles a Linux system to your application. When you build your -own image, your software is placed into that file system and sees that file -system as though it were looking at its operating system. Choosing this base -image has major impact on how secure, efficient and upgradeable your container -is in the future. - -Red Hat provides a new set of base images referred to as -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). -These images are based on Red Hat Enterprise Linux and are similar to base -images that Red Hat has offered in the past, with one major difference: they -are freely redistributable without a Red Hat subscription. As a result, you can -build your application on UBI images without having to worry about how they -are shared or the need to create different images for different environments. - -These UBI images have standard, init, and minimal versions. You can also use the -link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections] -images as a foundation for applications that rely on specific runtime -environments such as Node.js, Perl, or Python. Special versions of some of -these runtime base images are referred to as Source-to-Image (S2I) images. With -S2I images, you can insert your code into a base image environment that is ready -to run that code. - -S2I images are available for you to use directly from the {product-title} web UI -by selecting *Catalog* -> *Developer Catalog*, as shown in the following figure: - -.Choose S2I base images for apps that need specific runtimes -image::developer-catalog.png[{product-title} Developer Catalog] - -[id="understanding-development-registry-options"] -=== Registry options - -Container registries are where you store container images so you can share them -with others and make them available to the platform where they ultimately run. -You can select large, public container registries that offer free accounts or a -premium version that offer more storage and special features. You can also -install your own registry that can be exclusive to your organization or -selectively shared with others. - -To get Red Hat images and certified partner images, you can draw from the -Red Hat Registry. The Red Hat Registry is represented by two locations: -`registry.access.redhat.com`, which is unauthenticated and deprecated, and -`registry.redhat.io`, which requires authentication. You can learn about the Red -Hat and partner images in the Red Hat Registry from the -link:https://catalog.redhat.com/software/containers/explore[Container images section of the Red Hat Ecosystem Catalog]. -Besides listing Red Hat container images, it also shows extensive information -about the contents and quality of those images, including health scores that are -based on applied security updates. - -Large, public registries include link:https://hub.docker.com/[Docker Hub] and -link:https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red -Hat. Many of the components used in {product-title} are stored in Quay.io, -including container images and the Operators that are used to deploy -{product-title} itself. Quay.io also offers the means of storing other types of -content, including Helm charts. - -If you want your own, private container registry, {product-title} itself -includes a private container registry that is installed with {product-title} -and runs on its cluster. Red Hat also offers a private version of the Quay.io -registry called link:https://access.redhat.com/products/red-hat-quay[Red Hat Quay]. -Red Hat Quay includes geo replication, Git build triggers, Clair image scanning, -and many other features. - -All of the registries mentioned here can require credentials to download images -from those registries. Some of those credentials are presented on a cluster-wide -basis from {product-title}, while other credentials can be assigned to individuals. - -[id="creating-kubernetes-manifest-openshift"] -== Creating a Kubernetes manifest for {product-title} - -While the container image is the basic building block for a containerized -application, more information is required to manage and deploy that application -in a Kubernetes environment such as {product-title}. The typical next steps after -you create an image are to: - -* Understand the different resources you work with in Kubernetes manifests -* Make some decisions about what kind of an application you are running -* Gather supporting components -* Create a manifest and store that manifest in a Git repository so you can store -it in a source versioning system, audit it, track it, promote and deploy it -to the next environment, roll it back to earlier versions, if necessary, and -share it with others - -[id="understanding-kubernetes-pods"] -=== About Kubernetes pods and services - -While the container image is the basic unit with docker, the basic units that -Kubernetes works with are called -link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[pods]. -Pods represent the next step in building out an application. A pod can contain -one or more than one container. The key is that the pod is the single unit -that you deploy, scale, and manage. - -Scalability and namespaces are probably the main items to consider when determining -what goes in a pod. For ease of deployment, you might want to deploy a container -in a pod and include its own logging and monitoring container in the pod. Later, -when you run the pod and need to scale up an additional instance, those other -containers are scaled up with it. For namespaces, containers in a pod share the -same network interfaces, shared storage volumes, and resource limitations, -such as memory and CPU, which makes it easier to manage the contents of the pod -as a single unit. Containers in a pod can also communicate with each other by -using standard inter-process communications, such as System V semaphores or -POSIX shared memory. - -While individual pods represent a scalable unit in Kubernetes, a -link:https://kubernetes.io/docs/concepts/services-networking/service/[service] -provides a means of grouping together a set of pods to create a complete, stable -application that can complete tasks such as load balancing. A service is also -more permanent than a pod because the service remains available from the same -IP address until you delete it. When the service is in use, it is requested by -name and the {product-title} cluster resolves that name into the IP addresses -and ports where you can reach the pods that compose the service. - -By their nature, containerized applications are separated from the operating -systems where they run and, by extension, their users. Part of your Kubernetes -manifest describes how to expose the application to internal and external -networks by defining -link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[network policies] -that allow fine-grained control over communication with your containerized -applications. To connect incoming requests for HTTP, HTTPS, and other services -from outside your cluster to services inside your cluster, you can use an -link:https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`] -resource. - -If your container requires on-disk storage instead of database storage, which -might be provided through a service, you can add -link:https://kubernetes.io/docs/concepts/storage/volumes/[volumes] -to your manifests to make that storage available to your pods. You can configure -the manifests to create persistent volumes (PVs) or dynamically create volumes that -are added to your `Pod` definitions. - -After you define a group of pods that compose your application, you can define -those pods in -link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[`Deployment`] -and xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects. - -[id="application-types"] -=== Application types - -Next, consider how your application type influences how to run it. - -Kubernetes defines different types of workloads that are appropriate for -different kinds of applications. To determine the appropriate workload for your -application, consider if the application is: - -* Meant to run to completion and be done. An example is an application that -starts up to produce a report and exits when the report is complete. The -application might not run again then for a month. Suitable {product-title} -objects for these types of applications include -link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[`Job`] -and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[`CronJob`] objects. -* Expected to run continuously. For long-running applications, you can write a -xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[deployment]. -* Required to be highly available. If your application requires high -availability, then you want to size your deployment to have more than one -instance. A `Deployment` or `DeploymentConfig` object can incorporate a -link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[replica set] -for that type of application. With replica sets, pods run across multiple nodes -to make sure the application is always available, even if a worker goes down. -* Need to run on every node. Some types of Kubernetes applications are intended -to run in the cluster itself on every master or worker node. DNS and monitoring -applications are examples of applications that need to run continuously on every -node. You can run this type of application as a -link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[daemon set]. -You can also run a daemon set on a subset of nodes, based on node labels. -* Require life-cycle management. When you want to hand off your application so -that others can use it, consider creating an -link:https://www.openshift.com/learn/topics/operators[Operator]. Operators let you build in -intelligence, so it can handle things like backups and upgrades automatically. -Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose -Operators to selected namespaces so that users in the cluster can run them. -* Have identity or numbering requirements. An application might have identity -requirements or numbering requirements. For example, you might be -required to run exactly three instances of the application and to name the -instances `0`, `1`, and `2`. A -https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[stateful set] -is suitable for this application. Stateful sets are most useful for applications -that require independent storage, such as databases and zookeeper clusters. - -[id="supporting-components"] -=== Available supporting components - -The application you write might need supporting components, like a database or -a logging component. To fulfill that need, you might be able to obtain the -required component from the following Catalogs that are available in the -{product-title} web console: - -* OperatorHub, which is available in each {product-title} {product-version} -cluster. The OperatorHub makes Operators available from Red Hat, -certified Red Hat partners, and community members to the cluster operator. The -cluster operator can make those Operators available in all or selected -namespaces in the cluster, so developers can launch them and configure them -with their applications. -* Templates, which are useful for a one-off type of application, where the -lifecycle of a component is not important after it is installed. A template provides an easy -way to get started developing a Kubernetes application with minimal overhead. -A template can be a list of resource definitions, which could be `Deployment`, -`Service`, `Route`, or other objects. If you want to change names or resources, -you can set these values as parameters in the template. - -You can configure the supporting Operators and -templates to the specific needs of your development team and then make them -available in the namespaces in which your developers work. Many people add -shared templates to the `openshift` namespace because it is accessible from all -other namespaces. - -[id="applying-manifest"] -=== Applying the manifest - -Kubernetes manifests let you create a more complete picture of the components -that make up your Kubernetes applications. You write these manifests as YAML -files and deploy them by applying them to the cluster, for example, by running -the `oc apply` command. - -[id="manifest-next-steps"] -=== Next steps - -At this point, consider ways to automate your container development process. -Ideally, you have some sort of CI pipeline that builds the images and pushes -them to a registry. In particular, a GitOps pipeline integrates your container -development with the Git repositories that you use to store the software that -is required to build your applications. - -The workflow to this point might look like: - -* Day 1: You write some YAML. You then run the `oc apply` command to apply that -YAML to the cluster and test that it works. -* Day 2: You put your YAML container configuration file into your own Git -repository. From there, people who want to install that app, or help you improve -it, can pull down the YAML and apply it to their cluster to run the app. -* Day 3: Consider writing an Operator for your application. - -[id="develop-for-operators"] -== Develop for Operators - -Packaging and deploying your application as an Operator might be preferred -if you make your application available for others to run. As noted earlier, -Operators add a lifecycle component to your application that acknowledges that -the job of running an application is not complete as soon as it is installed. - -When you create an application as an Operator, you can build in your own -knowledge of how to run and maintain the application. You can build in features -for upgrading the application, backing it up, scaling it, or keeping track of -its state. If you configure the application correctly, maintenance tasks, -like updating the Operator, can happen automatically and invisibly to the -Operator's users. - -An example of a useful Operator is one that is set up to automatically back up -data at particular times. Having an Operator manage an application's backup at -set times can save a system administrator from remembering to do it. - -Any application maintenance that has traditionally been completed manually, -like backing up data or rotating certificates, can be completed automatically -with an Operator. diff --git a/authentication/_attributes b/authentication/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/authentication/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc b/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc deleted file mode 100644 index 8c1a33263195..000000000000 --- a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="assuming-an-aws-iam-role-for-a-service-account"] -= Assuming an AWS IAM role for a service account -include::_attributes/common-attributes.adoc[] -ifdef::openshift-rosa,openshift-dedicated[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::openshift-rosa,openshift-dedicated[] -:context: assuming-an-aws-iam-role-for-a-service-account - -toc::[] - -[role="_abstract"] -ifdef::openshift-rosa[] -{product-title} clusters that use the AWS Security Token Service (STS) include a pod identity webhook for use with pods that run in user-defined projects. -endif::openshift-rosa[] - -You can use the pod identity webhook to enable a service account to automatically assume an AWS Identity and Access Management (IAM) role in your own pods. If the assumed IAM role has the required AWS permissions, the pods can run AWS SDK operations by using temporary STS credentials. - -include::modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc[leveloffset=+1] -include::modules/assuming-an-aws-iam-role-in-your-own-pods.adoc[leveloffset=+1] -include::modules/setting-up-an-aws-iam-role-a-service-account.adoc[leveloffset=+2] -include::modules/creating-a-service-account-in-your-project.adoc[leveloffset=+2] -include::modules/creating-an-example-aws-sdk-container-image.adoc[leveloffset=+2] -include::modules/deploying-a-pod-that-includes-an-aws-sdk.adoc[leveloffset=+2] -include::modules/verifying-the-assumed-iam-role-in-your-pod.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources - -* For more information about using AWS IAM roles with service accounts, see link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[IAM roles for service accounts] in the AWS documentation. - -* For information about AWS IAM role delegation, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-service.html[Creating a role to delegate permissions to an AWS service] in the AWS documentation. - -* For details about AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. - -* For more information about installing and using the AWS Boto3 SDK for Python, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. - -ifdef::openshift-rosa,openshift-dedicated[] -* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.14/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation. -endif::openshift-rosa,openshift-dedicated[] diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc deleted file mode 100644 index c16fda7a3f0d..000000000000 --- a/authentication/bound-service-account-tokens.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="bound-service-account-tokens"] -= Using bound service account tokens -include::_attributes/common-attributes.adoc[] -:context: bound-service-account-tokens - -toc::[] - -You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as AWS IAM. - -// About bound service account tokens -include::modules/bound-sa-tokens-about.adoc[leveloffset=+1] - -// Configuring bound service account tokens using volume projection -include::modules/bound-sa-tokens-configuring.adoc[leveloffset=+1] - -// Creating bound service account tokens outside the pod -include::modules/bound-sa-tokens-configuring-externally.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-gracefully_nodes-nodes-rebooting[Rebooting a node gracefully] - -* xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[Creating service accounts] - -// TODO: Verify distros: openshift-enterprise,openshift-webscale,openshift-origin diff --git a/authentication/configuring-internal-oauth.adoc b/authentication/configuring-internal-oauth.adoc deleted file mode 100644 index 7e3f86c9e602..000000000000 --- a/authentication/configuring-internal-oauth.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-internal-oauth"] -= Configuring the internal OAuth server -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/oauth-server-overview.adoc[leveloffset=+1] - -include::modules/oauth-internal-tokens.adoc[leveloffset=+1] - -include::modules/oauth-internal-options.adoc[leveloffset=+1] - -include::modules/oauth-configuring-internal-oauth.adoc[leveloffset=+1] - -include::modules/oauth-configuring-token-inactivity-timeout.adoc[leveloffset=+1] - -include::modules/oauth-customizing-the-oauth-server-URL.adoc[leveloffset=+1] - -include::modules/oauth-server-metadata.adoc[leveloffset=+1] - -include::modules/oauth-troubleshooting-api-events.adoc[leveloffset=+1] diff --git a/authentication/configuring-ldap-failover.adoc b/authentication/configuring-ldap-failover.adoc deleted file mode 100644 index ede202898a53..000000000000 --- a/authentication/configuring-ldap-failover.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-ldap-failover"] -= Configuring LDAP failover -include::_attributes/common-attributes.adoc[] -:context: sssd-ldap-failover - -toc::[] - -include::modules/ldap-failover-overview.adoc[] - -include::modules/ldap-failover-prereqs.adoc[leveloffset=+1] - -include::modules/ldap-failover-generate-certs.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-sssd.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-apache.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-openshift.adoc[leveloffset=+1] diff --git a/authentication/configuring-oauth-clients.adoc b/authentication/configuring-oauth-clients.adoc deleted file mode 100644 index 2059ef4293b6..000000000000 --- a/authentication/configuring-oauth-clients.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-oauth-clients"] -= Configuring OAuth clients -include::_attributes/common-attributes.adoc[] -:context: configuring-oauth-clients - -toc::[] - -Several OAuth clients are created by default in {product-title}. You can also register and configure additional OAuth clients. - -// Default OAuth clients -include::modules/oauth-default-clients.adoc[leveloffset=+1] - -// Register an additional OAuth client -include::modules/oauth-register-additional-client.adoc[leveloffset=+1] - -// Configuring token inactivity timeout for OAuth clients -include::modules/oauth-configuring-token-inactivity-timeout-clients.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../rest_api/oauth_apis/oauthclient-oauth-openshift-io-v1.adoc#oauthclient-oauth-openshift-io-v1[OAuthClient [oauth.openshift.io/v1]] diff --git a/authentication/dedicated-understanding-authentication.adoc b/authentication/dedicated-understanding-authentication.adoc deleted file mode 100644 index 7d27e9512e64..000000000000 --- a/authentication/dedicated-understanding-authentication.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== diff --git a/authentication/identity_providers/_attributes b/authentication/identity_providers/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/identity_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc deleted file mode 100644 index bad3240b4239..000000000000 --- a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-basic-authentication-identity-provider"] -= Configuring a basic authentication identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-basic-authentication-identity-provider - -toc::[] - -Configure the `basic-authentication` identity provider for users to log in to {product-title} with credentials validated against a remote identity provider. Basic authentication is a generic back-end integration mechanism. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-basic-authentication.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/example-apache-httpd-configuration.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-troubleshooting.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-github-identity-provider.adoc b/authentication/identity_providers/configuring-github-identity-provider.adoc deleted file mode 100644 index 76a1b23f4c54..000000000000 --- a/authentication/identity_providers/configuring-github-identity-provider.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-github-identity-provider"] -= Configuring a GitHub or GitHub Enterprise identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-github-identity-provider - -toc::[] - -Configure the `github` identity provider to validate user names and passwords against GitHub or GitHub Enterprise's OAuth authentication server. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. - -You can use the GitHub integration to connect to either GitHub or GitHub Enterprise. For GitHub Enterprise integrations, you must provide the `hostname` of your instance and can optionally provide a `ca` certificate bundle to use in requests to the server. - -[NOTE] -==== -The following steps apply to both GitHub and GitHub Enterprise unless noted. -==== - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-github-about.adoc[leveloffset=+1] - -include::modules/identity-provider-registering-github.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-github-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc deleted file mode 100644 index 023dd8dec0fb..000000000000 --- a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-gitlab-identity-provider"] -= Configuring a GitLab identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-gitlab-identity-provider - -toc::[] - -Configure the `gitlab` identity provider using link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-gitlab-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-gitlab-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-google-identity-provider.adoc b/authentication/identity_providers/configuring-google-identity-provider.adoc deleted file mode 100644 index 90faa932bff9..000000000000 --- a/authentication/identity_providers/configuring-google-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-google-identity-provider"] -= Configuring a Google identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-google-identity-provider - -toc::[] - -Configure the `google` identity provider using the link:https://developers.google.com/identity/protocols/OpenIDConnect[Google OpenID Connect integration]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-google-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-google-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc deleted file mode 100644 index f13cfb919b85..000000000000 --- a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-htpasswd-identity-provider"] -= Configuring an htpasswd identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-htpasswd-identity-provider - -toc::[] - -Configure the `htpasswd` identity provider to allow users to log in to {product-title} with credentials from an htpasswd file. - -To define an htpasswd identity provider, perform the following tasks: - -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#creating-htpasswd-file[Create an `htpasswd` file] to store the user and password information. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-secret_{context}[Create -a secret] to represent the `htpasswd` file. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-htpasswd-CR_{context}[Define an htpasswd identity provider resource] that references the secret. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#add-identity-provider_{context}[Apply the resource] to -the default OAuth configuration to add the identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-htpasswd-about.adoc[leveloffset=+1] - -[id="creating-htpasswd-file"] -== Creating the htpasswd file - -See one of the following sections for instructions about how to create the htpasswd file: - -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-linux_configuring-htpasswd-identity-provider[Creating an htpasswd file using Linux] -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-windows_configuring-htpasswd-identity-provider[Creating an htpasswd file using Windows] - -include::modules/identity-provider-creating-htpasswd-file-linux.adoc[leveloffset=+2] - -include::modules/identity-provider-creating-htpasswd-file-windows.adoc[leveloffset=+2] - -include::modules/identity-provider-htpasswd-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-update-users.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-keystone-identity-provider.adoc b/authentication/identity_providers/configuring-keystone-identity-provider.adoc deleted file mode 100644 index 1bac30ad85a5..000000000000 --- a/authentication/identity_providers/configuring-keystone-identity-provider.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-keystone-identity-provider"] -= Configuring a Keystone identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-keystone-identity-provider - -toc::[] - -Configure the `keystone` identity provider to integrate your {product-title} cluster with Keystone to enable shared authentication with an OpenStack Keystone v3 server configured to store users in an internal database. This configuration allows users to log in to {product-title} with their Keystone credentials. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-ldap-identity-provider.adoc b/authentication/identity_providers/configuring-ldap-identity-provider.adoc deleted file mode 100644 index b659386195b6..000000000000 --- a/authentication/identity_providers/configuring-ldap-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-ldap-identity-provider"] -= Configuring an LDAP identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-ldap-identity-provider - -toc::[] - -Configure the `ldap` identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-about-ldap.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-ldap-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-ldap-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-oidc-identity-provider.adoc b/authentication/identity_providers/configuring-oidc-identity-provider.adoc deleted file mode 100644 index 2c3e74fba472..000000000000 --- a/authentication/identity_providers/configuring-oidc-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-oidc-identity-provider"] -= Configuring an OpenID Connect identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-oidc-identity-provider - -toc::[] - -Configure the `oidc` identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-oidc-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise[] -include::modules/identity-provider-oidc-supported.adoc[leveloffset=+1] -endif::openshift-enterprise[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-oidc-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc deleted file mode 100644 index ee20455070b4..000000000000 --- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-request-header-identity-provider"] -= Configuring a request header identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-request-header-identity-provider - -toc::[] - -Configure the `request-header` identity provider to identify users from request header values, such as `X-Remote-User`. It is typically used in combination with an authenticating proxy, which sets the request header value. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-request-header.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-request-header-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -[id="example-apache-auth-config-using-request-header"] -== Example Apache authentication configuration using request header - -This example configures an Apache authentication proxy for the {product-title} -using the request header identity provider. - -[discrete] -include::modules/identity-provider-apache-custom-proxy-configuration.adoc[leveloffset=+2] - -[discrete] -include::modules/identity-provider-configuring-apache-request-header.adoc[leveloffset=+2] diff --git a/authentication/identity_providers/images b/authentication/identity_providers/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/identity_providers/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/identity_providers/modules b/authentication/identity_providers/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/identity_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/identity_providers/snippets b/authentication/identity_providers/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/identity_providers/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/images b/authentication/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/authentication/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/authentication/impersonating-system-admin.adoc b/authentication/impersonating-system-admin.adoc deleted file mode 100644 index 32843c9f3a2d..000000000000 --- a/authentication/impersonating-system-admin.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="impersonating-system-admin"] -= Impersonating the system:admin user -include::_attributes/common-attributes.adoc[] -:context: impersonating-system-admin - -toc::[] - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-user.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-group.adoc[leveloffset=+1] diff --git a/authentication/index.adoc b/authentication/index.adoc deleted file mode 100644 index 691ea227852a..000000000000 --- a/authentication/index.adoc +++ /dev/null @@ -1,62 +0,0 @@ -[id="overview-of-authentication-authorization"] -= Overview of authentication and authorization -include::_attributes/common-attributes.adoc[] -:context: overview-of-authentication-authorization - -toc::[] - -include::modules/authentication-authorization-common-terms.adoc[leveloffset=+1] - -[id="authentication-overview"] -== About authentication in {product-title} -To control access to an {product-title} cluster, a cluster administrator can configure xref:../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] and ensure only approved users access the cluster. - -To interact with an {product-title} cluster, users must first authenticate to the {product-title} API in some way. You can authenticate by providing an xref:../authentication/understanding-authentication.adoc#rbac-api-authentication_understanding-authentication[OAuth access token or an X.509 client certificate] in your requests to the {product-title} API. - -[NOTE] -==== -If you do not present a valid access token or certificate, your request is unauthenticated and you receive an HTTP 401 error. -==== -An administrator can configure authentication through the following tasks: - -* Configuring an identity provider: You can define any xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[supported identity provider in {product-title}] and add it to your cluster. -* xref:../authentication/configuring-internal-oauth.adoc#configuring-internal-oauth[Configuring the internal OAuth server]: The {product-title} control plane includes a built-in OAuth server that determines the user’s identity from the configured identity provider and creates an access token. You can configure the token duration and inactivity timeout, and customize the internal OAuth server URL. -+ -[NOTE] -==== -Users can xref:../authentication/managing-oauth-access-tokens.adoc#managing-oauth-access-tokens[view and manage OAuth tokens owned by them]. -==== -* Registering an OAuth client: {product-title} includes several xref:../authentication/configuring-oauth-clients.adoc#oauth-default-clients_configuring-oauth-clients[default OAuth clients]. You can xref:../authentication/configuring-oauth-clients.adoc#oauth-register-additional-client_configuring-oauth-clients[register and configure additional OAuth clients]. -+ -[NOTE] -==== -When users send a request for an OAuth token, they must specify either a default or custom OAuth client that receives and uses the token. -==== - -* Managing cloud provider credentials using the xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[Cloud Credentials Operator]: Cluster components use cloud provider credentials to get permissions required to perform cluster-related tasks. -* Impersonating a system admin user: You can grant cluster administrator permissions to a user by xref:../authentication/impersonating-system-admin.adoc#impersonating-system-admin[impersonating a system admin user]. - -[id="authorization-overview"] -== About authorization in {product-title} -Authorization involves determining whether the identified user has permissions to perform the requested action. - -Administrators can define permissions and assign them to users using the xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC objects, such as rules, roles, and bindings]. To understand how authorization works in {product-title}, see xref:../authentication/using-rbac.adoc#evaluating-authorization_using-rbac[Evaluating authorization]. - -You can also control access to an {product-title} cluster through xref:../authentication/using-rbac.adoc#rbac-projects-namespaces_using-rbac[projects and namespaces]. - -Along with controlling user access to a cluster, you can also control the actions a pod can perform and the resources it can access using xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[security context constraints (SCCs)]. - -You can manage authorization for {product-title} through the following tasks: - -* Viewing xref:../authentication/using-rbac.adoc#viewing-local-roles_using-rbac[local] and xref:../authentication/using-rbac.adoc#viewing-cluster-roles_using-rbac[cluster] roles and bindings. -* Creating a xref:../authentication/using-rbac.adoc#creating-local-role_using-rbac[local role] and assigning it to a user or group. -* Creating a cluster role and assigning it to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can create additional xref:../authentication/using-rbac.adoc#creating-cluster-role_using-rbac[cluster roles] and xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group]. -* Creating a cluster-admin user: By default, your cluster has only one cluster administrator called `kubeadmin`. You can xref:../authentication/using-rbac.adoc#creating-cluster-admin_using-rbac[create another cluster administrator]. Before creating a cluster administrator, ensure that you have configured an identity provider. -+ -[NOTE] -==== -After creating the cluster admin user, xref:../authentication/remove-kubeadmin.adoc#removing-kubeadmin_removing-kubeadmin[delete the existing kubeadmin user] to improve cluster security. -==== -* Creating service accounts: xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-overview_understanding-service-accounts[Service accounts] provide a flexible way to control API access without sharing a regular user’s credentials. A user can xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[create and use a service account in applications] and also as xref:../authentication/using-service-accounts-as-oauth-client.adoc#using-service-accounts-as-oauth-client[an OAuth client]. -* xref:../authentication/tokens-scoping.adoc#tokens-scoping[Scoping tokens]: A scoped token is a token that identifies as a specific user who can perform only specific operations. You can create scoped tokens to delegate some of your permissions to another user or a service account. -* Syncing LDAP groups: You can manage user groups in one place by xref:../authentication/ldap-syncing.adoc#ldap-syncing[syncing the groups stored in an LDAP server] with the {product-title} user groups. diff --git a/authentication/ldap-syncing.adoc b/authentication/ldap-syncing.adoc deleted file mode 100644 index 9d788b1540e9..000000000000 --- a/authentication/ldap-syncing.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="ldap-syncing"] -= Syncing LDAP groups -include::_attributes/common-attributes.adoc[] -:context: ldap-syncing-groups - -toc::[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, -endif::[] -you can use groups to manage users, change -their permissions, and enhance collaboration. Your organization may have already -created user groups and stored them in an LDAP server. {product-title} can sync -those LDAP records with internal {product-title} records, enabling you to manage -your groups in one place. {product-title} currently supports group sync with -LDAP servers using three common schemas for defining group membership: RFC 2307, -Active Directory, and augmented Active Directory. - -For more information on configuring LDAP, see -xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider]. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[NOTE] -==== -You must have `cluster-admin` privileges to sync groups. -==== -endif::[] - -include::modules/ldap-syncing-about.adoc[leveloffset=+1] -include::modules/ldap-syncing-config-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-running.adoc[leveloffset=+1] -include::modules/ldap-syncing-running-all-ldap.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-openshift.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-subset.adoc[leveloffset=+2] -include::modules/ldap-syncing-pruning.adoc[leveloffset=+1] - -// Automatically syncing LDAP groups -include::modules/ldap-auto-syncing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider] -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs-creating-cron_nodes-nodes-jobs[Creating cron jobs] - -include::modules/ldap-syncing-examples.adoc[leveloffset=+1] -include::modules/ldap-syncing-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined-error.adoc[leveloffset=+2] -include::modules/ldap-syncing-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-nesting.adoc[leveloffset=+2] -include::modules/ldap-syncing-spec.adoc[leveloffset=+1] diff --git a/authentication/managing-oauth-access-tokens.adoc b/authentication/managing-oauth-access-tokens.adoc deleted file mode 100644 index 10867b018e6f..000000000000 --- a/authentication/managing-oauth-access-tokens.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-oauth-access-tokens"] -= Managing user-owned OAuth access tokens -include::_attributes/common-attributes.adoc[] -:context: managing-oauth-access-tokens - -toc::[] - -Users can review their own OAuth access tokens and delete any that are no longer needed. - -// Listing user-owned OAuth access tokens -include::modules/oauth-list-tokens.adoc[leveloffset=+1] - -// Viewing the details of a user-owned OAuth access token -include::modules/oauth-view-details-tokens.adoc[leveloffset=+1] - -// Deleting user-owned OAuth access tokens -include::modules/oauth-delete-tokens.adoc[leveloffset=+1] diff --git a/authentication/managing-security-context-constraints.adoc b/authentication/managing-security-context-constraints.adoc deleted file mode 100644 index 638cbba748b4..000000000000 --- a/authentication/managing-security-context-constraints.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-pod-security-policies"] -= Managing security context constraints -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -In {product-title}, you can use security context constraints (SCCs) to control permissions for the pods in your cluster. - -Default SCCs are created during installation and when you install some Operators or other components. As a cluster administrator, you can also create your own SCCs by using the OpenShift CLI (`oc`). - -[IMPORTANT] -==== -Do not modify the default SCCs. Customizing the default SCCs can lead to issues when some of the platform pods deploy or -ifndef::openshift-rosa[] -{product-title} -endif::[] -ifdef::openshift-rosa[] -ROSA -endif::openshift-rosa[] -is upgraded. Additionally, the default SCC values are reset to the defaults during some cluster upgrades, which discards all customizations to those SCCs. -ifdef::openshift-origin,openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-rosa[] - -Instead of modifying the default SCCs, create and modify your own SCCs as needed. For detailed steps, see xref:../authentication/managing-security-context-constraints.adoc#security-context-constraints-creating_configuring-internal-oauth[Creating security context constraints]. -endif::[] -==== - -ifdef::openshift-dedicated[] -[NOTE] -==== -In {product-title} deployments, you can create your own SCCs only for clusters that use the Customer Cloud Subscription (CCS) model. You cannot create SCCs for {product-title} clusters that use a Red Hat cloud account, because SCC resource creation requires `cluster-admin` privileges. -==== -endif::openshift-dedicated[] - -include::modules/security-context-constraints-about.adoc[leveloffset=+1] -include::modules/security-context-constraints-pre-allocated-values.adoc[leveloffset=+1] -include::modules/security-context-constraints-example.adoc[leveloffset=+1] -include::modules/security-context-constraints-creating.adoc[leveloffset=+1] -include::modules/security-context-constraints-rbac.adoc[leveloffset=+1] -include::modules/security-context-constraints-command-reference.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-internal-oauth"] -== Additional resources - -ifndef::openshift-dedicated,openshift-rosa[] -* xref:../support/getting-support.adoc#getting-support[Getting support] -endif::[] -ifdef::openshift-dedicated[] -* xref:../osd_architecture/osd-support.adoc#osd-getting-support[Getting support] -endif::[] -ifdef::openshift-rosa[] -* xref:../rosa_architecture/rosa-getting-support.adoc#rosa-getting-support[Getting support] -endif::[] diff --git a/authentication/managing_cloud_provider_credentials/_attributes b/authentication/managing_cloud_provider_credentials/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/managing_cloud_provider_credentials/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc deleted file mode 100644 index 1b786b225465..000000000000 --- a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc +++ /dev/null @@ -1,110 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-cloud-credential-operator"] -= About the Cloud Credential Operator -include::_attributes/common-attributes.adoc[] -:context: about-cloud-credential-operator - -toc::[] - -The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run. - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. - -[id="about-cloud-credential-operator-modes_{context}"] -== Modes - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in _mint_, _passthrough_, or _manual_ mode. These options provide transparency and flexibility in how the CCO uses cloud credentials to process `CredentialsRequest` CRs in the cluster, and allow the CCO to be configured to suit the security requirements of your organization. Not all CCO modes are supported for all cloud providers. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#cco-mode-mint[Mint]**: In mint mode, the CCO uses the provided admin-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc#cco-mode-passthrough[Passthrough]**: In passthrough mode, the CCO passes the provided cloud credential to the components that request cloud credentials. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual]**: In manual mode, a user manages cloud credentials instead of the CCO. - -** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Manual with AWS Security Token Service]**: In manual mode, you can configure an AWS cluster to use Amazon Web Services Security Token Service (AWS STS). With this configuration, the CCO uses temporary credentials for different components. - -** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Manual with GCP Workload Identity]**: In manual mode, you can configure a GCP cluster to use GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components. - -.CCO mode support matrix -[cols="<.^2,^.^1,^.^1,^.^1"] -|==== -|Cloud provider |Mint |Passthrough |Manual - -|{alibaba} -| -| -|X - -|Amazon Web Services (AWS) -|X -|X -|X - - -|Microsoft Azure -| -|X ^[1]^ -|X - -|Google Cloud Platform (GCP) -|X -|X -|X - -|IBM Cloud -| -| -|X - -|Nutanix -| -| -|X - -|{rh-openstack-first} -| -|X -| - -|VMware vSphere -| -|X -| - -|==== -[.small] --- -1. Manual mode is the only supported CCO configuration for Microsoft Azure Stack Hub. --- - -[id="cco-determine-mode_{context}"] -== Determining the Cloud Credential Operator mode - -For platforms that support using the CCO in multiple modes, you can determine what mode the CCO is configured to use by using the web console or the CLI. - -.Determining the CCO configuration -image::334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png[Decision tree showing how to determine the configured CCO credentials mode for your cluster.] - -//Determining the Cloud Credential Operator mode by using the web console -include::modules/cco-determine-mode-gui.adoc[leveloffset=+2] - -//Determining the Cloud Credential Operator mode by using the CLI -include::modules/cco-determine-mode-cli.adoc[leveloffset=+2] - -[id="about-cloud-credential-operator-default_{context}"] -== Default behavior -For platforms on which multiple modes are supported (AWS, Azure, and GCP), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs. - -By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs. - -If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installer fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered. - -If the credentials are changed after a successful installation and the CCO determines that the new credentials are insufficient, the CCO puts conditions on any new `CredentialsRequest` CRs to indicate that it cannot process them because of the insufficient credentials. - -To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can manually create IAM for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], and xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -[role="_additional-resources"] -[id="additional-resources_about-cloud-credential-operator_{context}"] -== Additional resources - -* xref:../../operators/operator-reference.adoc#cloud-credential-operator_cluster-operators-ref[Cluster Operators reference page for the Cloud Credential Operator] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc deleted file mode 100644 index 020f87e370bc..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc +++ /dev/null @@ -1,124 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-gcp-workload-identity"] -= Using manual mode with GCP Workload Identity -include::_attributes/common-attributes.adoc[] -:context: cco-mode-gcp-workload-identity - -toc::[] - -Manual mode with GCP Workload Identity is supported for Google Cloud Platform (GCP). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="gcp-workload-identity-mode-about_{context}"] -== About manual mode with GCP Workload Identity - -In manual mode with GCP Workload Identity, the individual {product-title} cluster components can impersonate IAM service accounts using short-term, limited-privilege credentials. - -Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider, combined with IAM service accounts. {product-title} signs service account tokens that are trusted by GCP, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour by default. - -.Workload Identity authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity] - -Using manual mode with GCP Workload Identity changes the content of the GCP credentials that are provided to individual {product-title} components. - -.GCP secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - service_account.json: <3> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The Base64 encoded service account. - -.Content of the Base64 encoded `service_account.json` file using long-lived credentials - -[source,json] ----- -{ - "type": "service_account", <1> - "project_id": "", - "private_key_id": "", - "private_key": "", <2> - "client_email": "", - "client_id": "", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/" -} ----- -<1> The credential type is `service_account`. -<2> The private RSA key that is used to authenticate to GCP. This key must be kept secure and is not rotated. - -.Content of the Base64 encoded `service_account.json` file using GCP Workload Identity - -[source,json] ----- -{ - "type": "external_account", <1> - "audience": "//iam.googleapis.com/projects/123456789/locations/global/workloadIdentityPools/test-pool/providers/test-provider", <2> - "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", - "token_url": "https://sts.googleapis.com/v1/token", - "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/:generateAccessToken", <3> - "credential_source": { - "file": "", <4> - "format": { - "type": "text" - } - } -} ----- -<1> The credential type is `external_account`. -<2> The target audience is the GCP Workload Identity provider. -<3> The resource URL of the service account that can be impersonated with these credentials. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with GCP Workload Identity -[id="gcp-workload-identity-mode-installing"] -== Installing an {product-title} cluster configured for manual mode with GCP Workload Identity - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with GCP Workload Identity: - -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-configuring_cco-mode-gcp-workload-identity[Configure the Cloud Credential Operator utility]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-creating-at-once_cco-mode-gcp-workload-identity[Create the required GCP resources]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-manual-run-installer_cco-mode-gcp-workload-identity[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-verifying_cco-mode-gcp-workload-identity[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using GCP Workload Identity, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new GCP permission requirements. Before upgrading a cluster that is using GCP Workload Identity, the cluster administrator must manually ensure that the GCP permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -//Task part 2: Creating the required GCP resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc deleted file mode 100644 index 5b403c38979a..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-manual"] -= Using manual mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-manual - -toc::[] - -Manual mode is supported for Alibaba Cloud, Amazon Web Services (AWS), Microsoft Azure, IBM Cloud, and Google Cloud Platform (GCP). - -In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. - -For information about configuring your cloud provider to use manual mode, see the manual credentials management options for your cloud provider: - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud] -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud] -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] - -[id="manual-mode-sts-blurb"] -== Manual mode with cloud credentials created and managed outside of the cluster - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components. - -For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service] or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity]. - -//Updating cloud provider resources with manually maintained credentials -include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] - -//Indicating that the cluster is ready to upgrade -include::modules/cco-manual-upgrade-annotation.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_cco-mode-manual"] -== Additional resources - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud] -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity] -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud] -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc deleted file mode 100644 index fd756d2e74d6..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-mint"] -= Using mint mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-mint - -toc::[] - -Mint mode is supported for Amazon Web Services (AWS) and Google Cloud Platform (GCP). - -Mint mode is the default mode on the platforms for which it is supported. In this mode, the Cloud Credential Operator (CCO) uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -If the credential is not removed after installation, it is stored and used by the CCO to process `CredentialsRequest` CRs for components in the cluster and create new credentials for each with only the specific permissions that are required. The continuous reconciliation of cloud credentials in mint mode allows actions that require additional credentials or permissions, such as upgrading, to proceed. - -Mint mode stores the administrator-level credential in the cluster `kube-system` namespace. If this approach does not meet the security requirements of your organization, see _Alternatives to storing administrator-level secrets in the kube-system project_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[AWS] or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[GCP]. - -[id="mint-mode-permissions"] -== Mint mode permissions requirements -When using the CCO in mint mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials are not sufficient for mint mode, the CCO cannot create an IAM user. - -[id="mint-mode-permissions-aws"] -=== Amazon Web Services (AWS) permissions -The credential you provide for mint mode in AWS must have the following permissions: - -* `iam:CreateAccessKey` -* `iam:CreateUser` -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:DeleteUserPolicy` -* `iam:GetUser` -* `iam:GetUserPolicy` -* `iam:ListAccessKeys` -* `iam:PutUserPolicy` -* `iam:TagUser` -* `iam:SimulatePrincipalPolicy` - -[id="mint-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for mint mode in GCP must have the following permissions: - -* `resourcemanager.projects.get` -* `serviceusage.services.list` -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.roles.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] - -//Mint Mode with removal or rotation of the admin credential -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] - -//Removing cloud provider credentials -include::modules/manually-removing-cloud-creds.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[Alternatives to storing administrator-level secrets in the kube-system project] for AWS -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[Alternatives to storing administrator-level secrets in the kube-system project] for GCP diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc deleted file mode 100644 index 95fdd8b65799..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc +++ /dev/null @@ -1,106 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-passthrough"] -= Using passthrough mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-passthrough - -toc::[] - -Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere. - -In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode. - -[NOTE] -==== -xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode] is the only supported CCO configuration for Microsoft Azure Stack Hub. -==== - -[id="passthrough-mode-permissions"] -== Passthrough mode permissions requirements -When using the CCO in passthrough mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials the CCO passes to a component that creates a `CredentialsRequest` CR are not sufficient, that component will report an error when it tries to call an API that it does not have permissions for. - -[id="passthrough-mode-permissions-aws"] -=== Amazon Web Services (AWS) permissions -The credential you provide for passthrough mode in AWS must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]. - -[id="passthrough-mode-permissions-azure"] -=== Microsoft Azure permissions -The credential you provide for passthrough mode in Azure must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]. - -[id="passthrough-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]. - -[id="passthrough-mode-permissions-rhosp"] -=== {rh-openstack-first} permissions -To install an {product-title} cluster on {rh-openstack}, the CCO requires a credential with the permissions of a `member` user role. - -[id="passthrough-mode-permissions-vsware"] -=== VMware vSphere permissions -To install an {product-title} cluster on VMware vSphere, the CCO requires a credential with the following vSphere privileges: - -.Required vSphere privileges -[cols="1,2"] -|==== -|Category |Privileges - -|Datastore -|_Allocate space_ - -|Folder -|_Create folder_, _Delete folder_ - -|vSphere Tagging -|All privileges - -|Network -|_Assign network_ - -|Resource -|_Assign virtual machine to resource pool_ - -|Profile-driven storage -|All privileges - -|vApp -|All privileges - -|Virtual machine -|All privileges - -|==== - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] - -[id="passthrough-mode-maintenance"] -== Passthrough mode credential maintenance -If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc[vSphere CSI Driver Operator] - -[id="passthrough-mode-reduce-permissions"] -== Reducing permissions after installation -When using passthrough mode, each component has the same permissions used by all other components. If you do not reduce the permissions after installing, all components have the broad permissions that are required to run the installer. - -After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using. - -To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -[role="_additional-resources"] -== Additional resources - -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc deleted file mode 100644 index 9e1771ee9d60..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc +++ /dev/null @@ -1,114 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-sts"] -= Using manual mode with Amazon Web Services Security Token Service -include::_attributes/common-attributes.adoc[] -:context: cco-mode-sts - -toc::[] - -Manual mode with STS is supported for Amazon Web Services (AWS). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="sts-mode-about_{context}"] -== About manual mode with AWS Security Token Service - -In manual mode with STS, the individual {product-title} cluster components use AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. - -Requests for new and refreshed credentials are automated by using an appropriately configured AWS IAM OpenID Connect (OIDC) identity provider, combined with AWS IAM roles. {product-title} signs service account tokens that are trusted by AWS IAM, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour. - -.STS authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_AWS.png[Detailed authentication flow between AWS and the cluster when using AWS STS] - -Using manual mode with STS changes the content of the AWS credentials that are provided to individual {product-title} components. - -.AWS secret format using long-lived credentials - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - aws_access_key_id: - aws_secret_access_key: ----- -<1> The namespace for the component. -<2> The name of the component secret. - -.AWS secret format with STS - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -stringData: - credentials: |- - [default] - sts_regional_endpoints = regional - role_name: <3> - web_identity_token_file: <4> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The IAM role for the component. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with STS -[id="sts-mode-installing_{context}"] -== Installing an {product-title} cluster configured for manual mode with STS - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with STS: - -//[pre-4.8]. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-config_cco-mode-sts[Create the required AWS resources] -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-configuring_cco-mode-sts[Configure the Cloud Credential Operator utility]. -. Create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-run-installer_cco-mode-sts[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-verifying_cco-mode-sts[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using STS, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new AWS permission requirements. Before upgrading a cluster that is using STS, the cluster administrator must manually ensure that the AWS permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//[pre-4.8]Task part 1: Creating AWS resources manually -//include::modules/sts-mode-installing-manual-config.adoc[leveloffset=+2] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -[id="sts-mode-create-aws-resources-ccoctl_{context}"] -=== Creating AWS resources with the Cloud Credential Operator utility - -You can use the CCO utility (`ccoctl`) to create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. - -//Task part 2a: Creating the required AWS resources individually -include::modules/cco-ccoctl-creating-individually.adoc[leveloffset=+3] - -//Task part 2b: Creating the required AWS resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/authentication/managing_cloud_provider_credentials/images b/authentication/managing_cloud_provider_credentials/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/managing_cloud_provider_credentials/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/modules b/authentication/managing_cloud_provider_credentials/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/managing_cloud_provider_credentials/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/snippets b/authentication/managing_cloud_provider_credentials/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/managing_cloud_provider_credentials/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/modules b/authentication/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/authentication/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/authentication/remove-kubeadmin.adoc b/authentication/remove-kubeadmin.adoc deleted file mode 100644 index 7557e91e4820..000000000000 --- a/authentication/remove-kubeadmin.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="removing-kubeadmin"] -= Removing the kubeadmin user -include::_attributes/common-attributes.adoc[] -:context: removing-kubeadmin - -toc::[] - -include::modules/authentication-kubeadmin.adoc[leveloffset=+1] - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] diff --git a/authentication/snippets b/authentication/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/authentication/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/authentication/tokens-scoping.adoc b/authentication/tokens-scoping.adoc deleted file mode 100644 index 126481d224da..000000000000 --- a/authentication/tokens-scoping.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="tokens-scoping"] -= Scoping tokens -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/tokens-scoping-about.adoc[leveloffset=+1] diff --git a/authentication/understanding-and-creating-service-accounts.adoc b/authentication/understanding-and-creating-service-accounts.adoc deleted file mode 100644 index 86fb149c7c2e..000000000000 --- a/authentication/understanding-and-creating-service-accounts.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-and-creating-service-accounts"] -= Understanding and creating service accounts -include::_attributes/common-attributes.adoc[] -:context: understanding-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -// include::modules/service-accounts-enabling-authentication.adoc[leveloffset=+1] - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-configuration-parameters.adoc[leveloffset=+1] - -include::modules/service-accounts-granting-roles.adoc[leveloffset=+1] diff --git a/authentication/understanding-and-managing-pod-security-admission.adoc b/authentication/understanding-and-managing-pod-security-admission.adoc deleted file mode 100644 index 4bf7dcd953c2..000000000000 --- a/authentication/understanding-and-managing-pod-security-admission.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-and-managing-pod-security-admission"] -= Understanding and managing pod security admission -include::_attributes/common-attributes.adoc[] -:context: understanding-and-managing-pod-security-admission - -toc::[] - -Pod security admission is an implementation of the link:https://kubernetes.io/docs/concepts/security/pod-security-standards/[Kubernetes pod security standards]. Use pod security admission to restrict the behavior of pods. - -// Security context constraint synchronization with pod security standards -include::modules/security-context-constraints-psa-synchronization.adoc[leveloffset=+1] - -// Controlling pod security admission synchronization -include::modules/security-context-constraints-psa-opting.adoc[leveloffset=+1] - -// About pod security admission alerts -include::modules/security-context-constraints-psa-rectifying.adoc[leveloffset=+1] - -// Identifying pod security violations -include::modules/security-context-constraints-psa-alert-eval.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_managing-pod-security-admission"] -== Additional resources - -* xref:../security/audit-log-view.adoc#nodes-nodes-audit-log-basic-viewing_audit-log-view[Viewing audit logs] -* xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints] diff --git a/authentication/understanding-authentication.adoc b/authentication/understanding-authentication.adoc deleted file mode 100644 index 6f438338cc28..000000000000 --- a/authentication/understanding-authentication.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-authentication"] -= Understanding authentication -include::_attributes/common-attributes.adoc[] -:context: understanding-authentication - -toc::[] - -For users to interact with {product-title}, they must first authenticate -to the cluster. The authentication layer identifies the user associated with requests to the -{product-title} API. The authorization layer then uses information about the -requesting user to determine if the request is allowed. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, you can configure authentication for {product-title}. -endif::[] - -include::modules/rbac-users.adoc[leveloffset=+1] - -include::modules/rbac-groups.adoc[leveloffset=+1] - -include::modules/rbac-api-authentication.adoc[leveloffset=+1] - -include::modules/oauth-server-overview.adoc[leveloffset=+2] - -include::modules/oauth-token-requests.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/authentication-api-impersonation.adoc[leveloffset=+3] - -include::modules/authentication-prometheus-system-metrics.adoc[leveloffset=+3] -endif::[] diff --git a/authentication/understanding-identity-provider.adoc b/authentication/understanding-identity-provider.adoc deleted file mode 100644 index 1c3ee54695a3..000000000000 --- a/authentication/understanding-identity-provider.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -The {product-title} master includes a built-in OAuth server. Developers and -administrators obtain OAuth access tokens to authenticate themselves to the API. - -As an administrator, you can configure OAuth to specify an identity provider -after you install your cluster. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#configuring-htpasswd-identity-provider[htpasswd] -|Configure the `htpasswd` identity provider to validate user names and passwords -against a flat file generated using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. - -|xref:../authentication/identity_providers/configuring-keystone-identity-provider.adoc#configuring-keystone-identity-provider[Keystone] -|Configure the `keystone` identity provider to integrate -your {product-title} cluster with Keystone to enable shared authentication with -an OpenStack Keystone v3 server configured to store users in an internal -database. - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc#configuring-basic-authentication-identity-provider[Basic authentication] -|Configure a `basic-authentication` identity provider for users to log in to -{product-title} with credentials validated against a remote identity provider. -Basic authentication is a generic backend integration mechanism. - -|xref:../authentication/identity_providers/configuring-request-header-identity-provider.adoc#configuring-request-header-identity-provider[Request header] -|Configure a `request-header` identity provider to identify users from request -header values, such as `X-Remote-User`. It is typically used in combination with -an authenticating proxy, which sets the request header value. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-gitlab-identity-provider.adoc#configuring-gitlab-identity-provider[GitLab] -|Configure a `gitlab` identity provider to use -link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity -provider. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== - -Once an identity provider has been defined, you can -xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[use RBAC to define and apply permissions]. - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -include::modules/identity-provider-default-CR.adoc[leveloffset=+1] diff --git a/authentication/using-rbac.adoc b/authentication/using-rbac.adoc deleted file mode 100644 index 83d9ee01c65d..000000000000 --- a/authentication/using-rbac.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-rbac"] -= Using RBAC to define and apply permissions -include::_attributes/common-attributes.adoc[] -:context: using-rbac - -toc::[] - -include::modules/rbac-overview.adoc[leveloffset=+1] - -include::modules/rbac-projects-namespaces.adoc[leveloffset=+1] - -include::modules/rbac-default-projects.adoc[leveloffset=+1] - -include::modules/rbac-viewing-cluster-roles.adoc[leveloffset=+1] - -include::modules/rbac-viewing-local-roles.adoc[leveloffset=+1] - -include::modules/rbac-adding-roles.adoc[leveloffset=+1] - -include::modules/rbac-creating-local-role.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/rbac-creating-cluster-role.adoc[leveloffset=+1] -endif::[] - -include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+1] - -include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/using-service-accounts-as-oauth-client.adoc b/authentication/using-service-accounts-as-oauth-client.adoc deleted file mode 100644 index e6f0834fd958..000000000000 --- a/authentication/using-service-accounts-as-oauth-client.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-service-accounts-as-oauth-client"] -= Using a service account as an OAuth client -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts-as-oauth-client - -toc::[] - -include::modules/service-accounts-as-oauth-clients.adoc[leveloffset=+1] diff --git a/authentication/using-service-accounts-in-applications.adoc b/authentication/using-service-accounts-in-applications.adoc deleted file mode 100644 index 90a236ddc3cb..000000000000 --- a/authentication/using-service-accounts-in-applications.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-service-accounts"] -= Using service accounts in applications -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -include::modules/service-accounts-default.adoc[leveloffset=+1] - -// remove these links for 4.12+ - -.Additional resources - -* For information about requesting bound service account tokens, see xref:../authentication/bound-service-account-tokens.adoc#bound-sa-tokens-configuring_bound-service-account-tokens[Configuring bound service account tokens using volume projection] - -* For information about creating a service account token secret, see xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret]. - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-using-credentials-inside-a-container.adoc[leveloffset=+1] diff --git a/backup_and_restore/_attributes b/backup_and_restore/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/_attributes b/backup_and_restore/application_backup_and_restore/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/backup_and_restore/application_backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc deleted file mode 100644 index 64f83e388436..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc +++ /dev/null @@ -1,129 +0,0 @@ -:_content-type: ASSEMBLY -[id="backing-up-applications"] -= Backing up applications -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -You back up applications by creating a `Backup` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR]. - -The `Backup` CR creates backup files for Kubernetes resources and internal images, on S3 object storage, and snapshots for persistent volumes (PVs), if the cloud provider uses a native snapshot API or the Container Storage Interface (CSI) to create snapshots, such as {rh-storage} 4. - -For more information about CSI volume snapshots, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots]. - -:FeatureName: The `CloudStorage` API for S3 storage -include::snippets/technology-preview.adoc[] - -* If your cloud provider has a native snapshot API or supports CSI snapshots, the `Backup` CR backs up persistent volumes (PVs) by creating snapshots. For more information about working with CSI snapshots, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-pvs-csi_backing-up-applications[Backing up persistent volumes with CSI snapshots]. - -* If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using Restic. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic]. - -[IMPORTANT] -==== -The {oadp-first} does not support backing up volume snapshots that were created by other software. -==== - -You can create backup hooks to run commands before or after the backup operation. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Creating backup hooks]. - -You can schedule backups by creating a `Schedule` CR instead of a `Backup` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduling backups]. - -include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1] -include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+1] -include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+1] -include::modules/oadp-using-data-mover-for-csi-snapshots.adoc[leveloffset=+1] - -[id="oadp-12-data-mover-ceph"] -== Using OADP 1.2 Data Mover with Ceph storage - -You can use OADP 1.2 Data Mover to backup and restore application data for clusters that use CephFS, CephRBD, or both. - -OADP 1.2 Data Mover leverages Ceph features that support large-scale environments. One of these is the shallow copy method, which is available for {product-title} 4.12 and later. This feature supports backing up and restoring `StorageClass` and `AccessMode` resources other than what is found on the source persistent volume claim (PVC). - -[IMPORTANT] -==== -The CephFS shallow copy feature is a back up feature. It is not part of restore operations. -==== - -include::modules/oadp-ceph-prerequisites.adoc[leveloffset=+2] - -[id="defining-crs-for-12-data-mover"] -=== Defining custom resources for use with OADP 1.2 Data Mover - -When you install {rh-storage-first}, it automatically creates default CephFS and a CephRBD `StorageClass` and `VolumeSnapshotClass` custom resources (CRs). You must define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover. - -After you define the CRs, you must make several other changes to your environment before you can perform your back up and restore operations. - -include::modules/oadp-ceph-preparing-cephfs-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-cephrbd-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-crs-additional.adoc[leveloffset=+2] - -[id="oadp-ceph-back-up-restore-cephfs"] -=== Backing up and restoring data using OADP 1.2 Data Mover and CephFS storage - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage by enabling the shallow copy feature of CephFS. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !backing-up-applications - -:context: cephfs - -include::modules/oadp-ceph-cephfs-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -[id="oadp-ceph-split"] -=== Backing up and restoring data using OADP 1.2 Data Mover and split volumes (CephFS and Ceph RBD) - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data in an environment that has _split volumes_, that is, an environment that uses both CephFS and CephRBD. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !cephfs - -:context: split - -include::modules/oadp-ceph-split-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -:context: !split - -:context: backing-up-applications - -[id="oadp-cleaning-up-after-data-mover-1-1-backup"] -== Cleaning up after a backup using OADP 1.1 Data Mover - -For OADP 1.1 Data Mover, you must perform a data cleanup after you perform a backup. - -The cleanup consists of deleting the following resources: - -* Snapshots in a bucket -* Cluster resources -* Volume snapshot backups (VSBs) after a backup procedure that is either run by a schedule or is run repetitively - -include::modules/oadp-cleaning-up-after-data-mover-snapshots.adoc[leveloffset=+2] - -[id="deleting-cluster-resources"] -=== Deleting cluster resources - -OADP 1.1 Data Mover might leave cluster resources whether or not it successfully backs up your container storage interface (CSI) volume snapshots to a remote object store. - -include::modules/oadp-deleting-cluster-resources-following-success.adoc[leveloffset=+3] -include::modules/oadp-deleting-cluster-resources-following-failure.adoc[leveloffset=+3] - -include::modules/oadp-vsb-cleanup-after-scheduler.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators on clusters for administrators] -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Installing Operators in namespaces for non-administrators] - -include::modules/oadp-creating-backup-hooks.adoc[leveloffset=+1] -include::modules/oadp-scheduling-backups.adoc[leveloffset=+1] -include::modules/oadp-deleting-backups.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#velero-obtaining-by-downloading_oadp-troubleshooting[Downloading the Velero CLI tool] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc deleted file mode 100644 index ab3ec37d6077..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="restoring-applications"] -= Restoring applications -include::_attributes/common-attributes.adoc[] -:context: restoring-applications - -toc::[] - -You restore application backups by creating a `Restore` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. - -You can create restore hooks to run commands in a container in a pod while restoring your application by editing the `Restore` (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[Creating restore hooks] - -include::modules/oadp-creating-restore-cr.adoc[leveloffset=+1] -include::modules/oadp-creating-restore-hooks.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/images b/backup_and_restore/application_backup_and_restore/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/_attributes b/backup_and_restore/application_backup_and_restore/installing/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc deleted file mode 100644 index 7969b21753ff..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-installing-oadp"] -= About installing OADP -include::_attributes/common-attributes.adoc[] -:context: about-installing-oadp - -toc::[] - -As a cluster administrator, you install the OpenShift API for Data Protection (OADP) by installing the OADP Operator. The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -To back up Kubernetes resources and internal images, you must have object storage as a backup location, such as one of the following storage types: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] -* AWS S3 compatible object storage, such as Noobaa or Minio - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You can back up persistent volumes (PVs) by using snapshots or Restic. - -To back up PVs with snapshots, you must have a cloud provider that supports either a native snapshot API or Container Storage Interface (CSI) snapshots, such as one of the following cloud providers: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* CSI snapshot-enabled cloud provider, such as xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#installing-oadp-ocs[OpenShift Data Foundation] - -include::snippets/oadp-ocp-compat.adoc[] - -If your cloud provider does not support snapshots or if your storage is NFS, you can back up applications with xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups] on object storage. - -You create a default `Secret` and then you install the Data Protection Application. - -include::modules/oadp-s3-compatible-backup-storage-providers.adoc[leveloffset=+1] - -include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+1] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://{velero-domain}/docs/v{velero-version}/locations/[Overview of backup and snapshot locations in the Velero documentation] - -include::modules/about-oadp-update-channels.adoc[leveloffset=+1] -include::modules/about-installing-oadp-on-multiple-namespaces.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service version] - -include::modules/oadp-velero-cpu-memory-requirements.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/images b/backup_and_restore/application_backup_and_restore/installing/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc deleted file mode 100644 index 64b8b58df482..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-aws"] -= Installing and configuring the OpenShift API for Data Protection with Amazon Web Services -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-aws -:installing-oadp-aws: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Amazon Web Services (AWS) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-aws"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:!installing-oadp-aws: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc deleted file mode 100644 index 3077b98b3b71..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-azure"] -= Installing and configuring the OpenShift API for Data Protection with Microsoft Azure -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-azure -:installing-oadp-azure: -:credentials: cloud-credentials-azure -:provider: azure - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Microsoft Azure by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-azure.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-azure"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-azure!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc deleted file mode 100644 index 6b688c72a17a..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-gcp"] -= Installing and configuring the OpenShift API for Data Protection with Google Cloud Platform -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-gcp -:installing-oadp-gcp: -:credentials: cloud-credentials-gcp -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Google Cloud Platform (GCP) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-gcp.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-gcp"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-gcp!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc deleted file mode 100644 index ff7180e2410e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-mcg"] -= Installing and configuring the OpenShift API for Data Protection with Multicloud Object Gateway -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-mcg -:installing-oadp-mcg: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Multicloud Object Gateway (MCG) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] as a backup location. -MCG is a component of {rh-storage}. You configure MCG as a backup location in the `DataProtectionApplication` custom resource (CR). - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You create a `Secret` for the backup location and then you install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-mcg.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-mcg"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-mcg!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc deleted file mode 100644 index 651717695045..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-ocs"] -= Installing and configuring the OpenShift API for Data Protection with OpenShift Data Foundation -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-ocs -:credentials: cloud-credentials -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with {rh-storage} by installing the OADP Operator and configuring a backup location and a snapshot location. Then, you install the Data Protection Application. - -include::snippets/oadp-mtc-operator.adoc[] - -You can configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] or any S3-compatible object storage as a backup location. - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You create a `Secret` for the backup location and then you install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-ocs"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - diff --git a/backup_and_restore/application_backup_and_restore/installing/modules b/backup_and_restore/application_backup_and_restore/installing/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/snippets b/backup_and_restore/application_backup_and_restore/installing/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc deleted file mode 100644 index 49f3c9b02f2e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-oadp"] -= Uninstalling the OpenShift API for Data Protection -include::_attributes/common-attributes.adoc[] -:context: uninstalling-oadp - -toc::[] - -You uninstall the OpenShift API for Data Protection (OADP) by deleting the OADP Operator. See xref:../../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-cluster[Deleting Operators from a cluster] for details. diff --git a/backup_and_restore/application_backup_and_restore/modules b/backup_and_restore/application_backup_and_restore/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc b/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc deleted file mode 100644 index 26d17d149167..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-advanced-topics"] -= Advanced OADP features and functionalities -include::_attributes/common-attributes.adoc[] -:context: oadp-advanced-topics - -toc::[] - -This document provides information about advanced features and functionalities of OpenShift API for Data Protection (OADP). - -[id="oadp-different-kubernetes-api-versions"] -== Working with different Kubernetes API versions on the same cluster - -include::modules/oadp-checking-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-about-enable-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-using-enable-api-group-versions.adoc[leveloffset=+2] - -[id="backing-up-data-one-cluster-restoring-another-cluster"] -== Backing up data from one cluster and restoring it to another cluster - -include::modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] -include::modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_oadp-advanced-topics"] -== Additional resources - -For more information about API group versions, see xref:../../backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc#oadp-different-kubernetes-api-versions[Working with different Kubernetes API versions on the same cluster]. - -For more information about OADP Data Mover, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-using-data-mover-for-csi-snapshots_backing-up-applications[Using Data Mover for CSI snapshots]. - -For more information about using Restic with OADP, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic]. - -:!oadp-advanced-topics: diff --git a/backup_and_restore/application_backup_and_restore/oadp-api.adoc b/backup_and_restore/application_backup_and_restore/oadp-api.adoc deleted file mode 100644 index 6ac2bd278c3f..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-api.adoc +++ /dev/null @@ -1,251 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-api"] -= APIs used with OADP -include::_attributes/common-attributes.adoc[] -:context: oadp-api -:namespace: openshift-adp -:local-product: OADP -:velero-domain: velero.io - -toc::[] - -The document provides information about the following APIs that you can use with OADP: - -* Velero API -* OADP API - -[id="velero-api"] -== Velero API - -Velero API documentation is maintained by Velero, not by Red Hat. It can be found at link:https://velero.io/docs/main/api-types/[Velero API types]. - -[id="oadp-api-tables"] -== OADP API - -The following tables provide the structure of the OADP API: - -.DataProtectionApplicationSpec -[options="header"] -|=== -|Property|Type|Description - -|`backupLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[`BackupLocation`] -|Defines the list of configurations to use for `BackupStorageLocations`. - -|`snapshotLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[`SnapshotLocation`] -|Defines the list of configurations to use for `VolumeSnapshotLocations`. - -|`unsupportedOverrides` -|map [ link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#UnsupportedImageKey[UnsupportedImageKey] ] link:https://pkg.go.dev/builtin#string[string] -|Can be used to override the deployed dependent images for development. Options are `veleroImageFqin`, `awsPluginImageFqin`, `openshiftPluginImageFqin`, `azurePluginImageFqin`, `gcpPluginImageFqin`, `csiPluginImageFqin`, `dataMoverImageFqin`, `resticRestoreImageFqin`, `kubevirtPluginImageFqin`, and `operator-type`. - -|`podAnnotations` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Used to add annotations to pods deployed by Operators. - -|`podDnsPolicy` -|link:https://pkg.go.dev/k8s.io/api/core/v1#DNSPolicy[`DNSPolicy`] -|Defines the configuration of the DNS of a pod. - -|`podDnsConfig` -|link:https://pkg.go.dev/k8s.io/api/core/v1#PodDNSConfig[`PodDNSConfig`] -|Defines the DNS parameters of a pod in addition to those generated from `DNSPolicy`. - -|`backupImages` -|*link:https://pkg.go.dev/builtin#bool[bool] -|Used to specify whether or not you want to deploy a registry for enabling backup and restore of images. - -|`configuration` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[`ApplicationConfig`] -|Used to define the data protection application's server configuration. - -|`features` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[`Features`] -|Defines the configuration for the DPA to enable the Technology Preview features. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataProtectionApplicationSpec[Complete schema definitions for the OADP API]. - -.BackupLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocationSpec[velero.BackupStorageLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocation[Backup Storage Location]. - -|`bucket` -| *link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CloudStorageLocation[CloudStorageLocation] -| [Technology Preview] Automates creation of a bucket at some cloud storage providers for use as a backup storage location. -|=== - -:FeatureName: The `bucket` parameter -include::snippets/technology-preview.adoc[leveloffset=+1] - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[Complete schema definitions for the type `BackupLocation`]. - -.SnapshotLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocationSpec[VolumeSnapshotLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocation[Volume Snapshot Location]. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[Complete schema definitions for the type `SnapshotLocation`]. - -.ApplicationConfig -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[VeleroConfig] -|Defines the configuration for the Velero server. - -|`restic` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[ResticConfig] -|Defines the configuration for the Restic server. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[Complete schema definitions for the type `ApplicationConfig`]. - -.VeleroConfig -[options="header"] -|=== -|Property|Type|Description - -|`featureFlags` -|[] link:https://pkg.go.dev/builtin#string[string] -|Defines the list of features to enable for the Velero instance. - -|`defaultPlugins` -|[] link:https://pkg.go.dev/builtin#string[string] -|The following types of default Velero plugins can be installed: `aws`,`azure`, `csi`, `gcp`, `kubevirt`, and `openshift`. - -|`customPlugins` -|[]link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[CustomPlugin] -|Used for installation of custom Velero plugins. - -Default and custom plugins are described in xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins#oadp-features-plugins[OADP plugins] - -|`restoreResourcesVersionPriority` -|link:https://pkg.go.dev/builtin#string[string] -|Represents a config map that is created if defined for use in conjunction with the `EnableAPIGroupVersions` feature flag. Defining this field automatically adds `EnableAPIGroupVersions` to the Velero server feature flag. - -|`noDefaultBackupLocation` -|link:https://pkg.go.dev/builtin#bool[bool] -|To install Velero without a default backup storage location, you must set the `noDefaultBackupLocation` flag in order to confirm installation. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Velero` pod. - -|`logLevel` -|link:https://pkg.go.dev/builtin#string[string] -|Velero server’s log level (use `debug` for the most granular logging, leave unset for Velero default). Valid options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`, and `panic`. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[Complete schema definitions for the type `VeleroConfig`]. - -.CustomPlugin -[options="header"] -|=== -|Property|Type|Description - -|`name` -|link:https://pkg.go.dev/builtin#string[string] -|Name of custom plugin. - -|`image` -|link:https://pkg.go.dev/builtin#string[string] -|Image of custom plugin. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[Complete schema definitions for the type `CustomPlugin`]. - -.ResticConfig -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|*link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, enables backup and restore using Restic. If set to `false`, snapshots are needed. - -|`supplementalGroups` -|[]link:https://pkg.go.dev/builtin#int64[int64] -|Defines the Linux groups to be applied to the `Restic` pod. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string that defines the Restic timeout. Default value is `1hr` (1 hour). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Restic` pod. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[Complete schema definitions for the type `ResticConfig`]. - -.PodConfig -[options="header"] -|=== -|Property|Type|Description - -|`nodeSelector` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Defines the `nodeSelector` to be supplied to a `Velero` `podSpec` or a `Restic` `podSpec`. - -|`tolerations` -|[]link:https://pkg.go.dev/k8s.io/api/core/v1#Toleration[Toleration] -|Defines the list of tolerations to be applied to a Velero deployment or a Restic `daemonset`. - -|`resourceAllocations` -|link:https://pkg.go.dev/k8s.io/api/core/v1#ResourceRequirements[ResourceRequirements] -|Set specific resource `limits` and `requests` for a `Velero` pod or a `Restic` pod as described in xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-setting-resource-limits-and-requests_installing-oadp-aws[Setting Velero CPU and memory resource allocations]. - -|`labels` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Labels to add to pods. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[Complete schema definitions for the type `PodConfig`]. - -.Features -[options="header"] -|=== -|Property|Type|Description - -|`dataMover` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataMover[`DataMover`] -|Defines the configuration of the Data Mover. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[Complete schema definitions for the type `Features`]. - -.DataMover -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, deploys the volume snapshot mover controller and a modified CSI Data Mover plugin. If set to `false`, these are not deployed. - -|`credentialName` -|link:https://pkg.go.dev/builtin#string[string] -|User-supplied Restic `Secret` name for Data Mover. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string for `VolumeSnapshotBackup` and `VolumeSnapshotRestore` to complete. Default is `10m` (10 minutes). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. -|=== - -The OADP API is more fully detailed in link:https://pkg.go.dev/github.com/openshift/oadp-operator[OADP Operator]. - diff --git a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc b/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc deleted file mode 100644 index 417275375f02..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-features-plugins"] -= OADP features and plugins -include::_attributes/common-attributes.adoc[] -:context: oadp-features-plugins - -toc::[] - -OpenShift API for Data Protection (OADP) features provide options for backing up and restoring applications. - -The default plugins enable Velero to integrate with certain cloud providers and to back up and restore {product-title} resources. - -include::modules/oadp-features.adoc[leveloffset=+1] -include::modules/oadp-plugins.adoc[leveloffset=+1] -include::modules/oadp-configuring-velero-plugins.adoc[leveloffset=+1] - -[id="oadp-support-for-ibm-power-and-ibm-z"] -== OADP support for IBM Power and {ibmzProductName} - -OpenShift API for Data Protection (OADP) is platform neutral. The information that follows relates only to IBM Power and to {ibmzProductName}. - -OADP 1.1.0 was tested successfully against {product-title} 4.11 for both IBM Power and {ibmzProductName}. The sections that follow give testing and support information for OADP 1.1.0 in terms of backup locations for these systems. - -include::modules/oadp-ibm-power-test-support.adoc[leveloffset=+2] -include::modules/oadp-ibm-z-test-support.adoc[leveloffset=+2] - -:!oadp-features-plugins: diff --git a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc b/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc deleted file mode 100644 index 50fff6ad65a5..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-release-notes"] -= OADP release notes -include::_attributes/common-attributes.adoc[] -:context: oadp-release-notes - -toc::[] - -The release notes for OpenShift API for Data Protection (OADP) describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues. - - -include::modules/oadp-release-notes-1-2-0.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-4.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-2.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-1.adoc[leveloffset=+1] - -:!oadp-release-notes: diff --git a/backup_and_restore/application_backup_and_restore/snippets b/backup_and_restore/application_backup_and_restore/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/backup_and_restore/application_backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc b/backup_and_restore/application_backup_and_restore/troubleshooting.adoc deleted file mode 100644 index 7d14d9451680..000000000000 --- a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: oadp-troubleshooting -:namespace: openshift-adp -:local-product: OADP -:must-gather: registry.redhat.io/oadp/oadp-mustgather-rhel8:v1.1 - -toc::[] - -You can debug Velero custom resources (CRs) by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-debugging-oc-cli_oadp-troubleshooting[OpenShift CLI tool] or the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-debugging-velero-resources_oadp-troubleshooting[Velero CLI tool]. The Velero CLI tool provides more detailed logs and information. - -You can check xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-installation-issues_oadp-troubleshooting[installation issues], xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-backup-restore-cr-issues_oadp-troubleshooting[backup and restore CR issues], and xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-restic-issues_oadp-troubleshooting[Restic issues]. - -You can collect logs, CR information, and Prometheus metric data by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-using-must-gather_oadp-troubleshooting[`must-gather` tool]. - -You can obtain the Velero CLI tool by: - -* Downloading the Velero CLI tool -* Accessing the Velero binary in the Velero deployment in the cluster - -include::modules/velero-obtaining-by-downloading.adoc[leveloffset=+1] -include::modules/velero-obtaining-by-accessing-binary.adoc[leveloffset=+1] - -include::modules/oadp-debugging-oc-cli.adoc[leveloffset=+1] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+1] - - - -[id="oadp-pod-crash-resource-request"] -== Pods crash or restart due to lack of memory or CPU - -If a Velero or Restic pod crashes due to a lack of memory or CPU, you can set specific resource requests for either of those resources. -[role="_additional-resources"] -.Additional resources -* xref:../../backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc#oadp-velero-cpu-memory-requirements_about-installing-oadp[CPU and memory requirements] - -include::modules/oadp-pod-crash-set-resource-request-velero.adoc[leveloffset=+2] -include::modules/oadp-pod-crash-set-resource-request-restic.adoc[leveloffset=+2] - -[IMPORTANT] -==== -The values for the resource request fields must follow the same format as Kubernetes resource requirements. -Also, if you do not specify `configuration.velero.podConfig.resourceAllocations` or `configuration.restic.podConfig.resourceAllocations`, the default `resources` specification for a Velero pod or a Restic pod is as follows: - -[source,yaml] ----- -requests: - cpu: 500m - memory: 128Mi ----- -==== - -[id="issues-with-velero-and-admission-workbooks"] -== Issues with Velero and admission webhooks - -Velero has limited abilities to resolve admission webhook issues during a restore. If you have workloads with admission webhooks, you might need to use an additional Velero plugin or make changes to how you restore the workload. - -Typically, workloads with admission webhooks require you to create a resource of a specific kind first. This is especially true if your workload has child resources because admission webhooks typically block child resources. - -For example, creating or restoring a top-level object such as `service.serving.knative.dev` typically creates child resources automatically. If you do this first, you will not need to use Velero to create and restore these resources. This avoids the problem of child resources being blocked by an admission webhook that Velero might use. - -[id="velero-restore-workarounds-for-workloads-with-admission-webhooks"] -=== Restoring workarounds for Velero backups that use admission webhooks - -This section describes the additional steps required to restore resources for several types of Velero backups that use admission webhooks. - -include::modules/migration-debugging-velero-admission-webhooks-knative.adoc[leveloffset=+3] -include::modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../../architecture/admission-plug-ins.adoc[Admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhook-types_admission-plug-ins[Types of webhook admission plugins] - -include::modules/oadp-installation-issues.adoc[leveloffset=+1] -include::modules/oadp-backup-restore-cr-issues.adoc[leveloffset=+1] -include::modules/oadp-restic-issues.adoc[leveloffset=+1] - -include::modules/migration-using-must-gather.adoc[leveloffset=+1] - -:!oadp-troubleshooting: diff --git a/backup_and_restore/control_plane_backup_and_restore/_attributes b/backup_and_restore/control_plane_backup_and_restore/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc deleted file mode 100644 index a8bc6819d7a5..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="backup-etcd"] -= Backing up etcd -include::_attributes/common-attributes.adoc[] -:context: backup-etcd - -toc::[] - -etcd is the key-value store for {product-title}, which persists the state of all resource objects. - -Back up your cluster's etcd data regularly and store in a secure location ideally outside the {product-title} environment. Do not take an etcd backup before the first certificate rotation completes, which occurs 24 hours after installation, otherwise the backup will contain expired certificates. It is also recommended to take etcd backups during non-peak usage hours because the etcd snapshot has a high I/O cost. - -Be sure to take an etcd backup after you upgrade your cluster. This is important because when you restore your cluster, you must use an etcd backup that was taken from the same z-stream release. For example, an {product-title} 4.y.z cluster must use an etcd backup that was taken from 4.y.z. - -[IMPORTANT] -==== -Back up your cluster's etcd data by performing a single invocation of the backup script on a control plane host. Do not take a backup for each control plane host. -==== - -After you have an etcd backup, you can xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. - -// Backing up etcd data -include::modules/backup-etcd.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_backup-etcd"] -== Additional resources -* xref:../../hosted_control_planes/hcp-backup-restore-dr.adoc#hcp-backup-restore[Backing up and restoring etcd on a hosted cluster] diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc deleted file mode 100644 index 38baebe6e0c6..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-dr"] -= About disaster recovery -include::_attributes/common-attributes.adoc[] -:context: about-dr - -toc::[] - -The disaster recovery documentation provides information for administrators on -how to recover from several disaster situations that might occur with their -{product-title} cluster. As an administrator, you might need to follow one or -more of the following procedures to return your cluster to a working -state. - -[IMPORTANT] -==== -Disaster recovery requires you to have at least one healthy control plane host. -==== - -xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]:: -This solution handles situations where you want to restore your cluster to -a previous state, for example, if an administrator deletes something critical. -This also includes situations where you have lost the majority of your control plane hosts, leading to etcd quorum loss and the cluster going offline. As long as you have taken an etcd backup, you can follow this procedure to restore your cluster to a previous state. -+ -If applicable, you might also need to xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates]. -+ -[WARNING] -==== -Restoring to a previous cluster state is a destructive and destablizing action to take on a running cluster. This procedure should only be used as a last resort. - -Prior to performing a restore, see xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-scenario-2-restoring-cluster-state-about_dr-restoring-cluster-state[About restoring cluster state] for more information on the impact to the cluster. -==== -+ -[NOTE] -==== -If you have a majority of your masters still available and have an etcd quorum, then follow the procedure to xref:../../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace a single unhealthy etcd member]. -==== - -xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]:: -This solution handles situations where your control plane certificates have -expired. For example, if you shut down your cluster before the first certificate -rotation, which occurs 24 hours after installation, your certificates will not -be rotated and will expire. You can follow this procedure to recover from -expired control plane certificates. diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc deleted file mode 100644 index 4400fc6492a2..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="dr-restoring-cluster-state"] -= Restoring to a previous cluster state -include::_attributes/common-attributes.adoc[] -:context: dr-restoring-cluster-state - -toc::[] - -To restore the cluster to a previous state, you must have previously xref:../../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state. - -// About restoring to a previous cluster state -include::modules/dr-restoring-cluster-state-about.adoc[leveloffset=+1] - -// Restoring to a previous cluster state -include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_dr-restoring-cluster-state"] -== Additional resources - -* xref:../../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Installing a user-provisioned cluster on bare metal] -* xref:../../../networking/accessing-hosts.adoc#accessing-hosts[Creating a bastion host to access {product-title} instances and the control plane nodes with SSH] -* xref:../../../installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#replacing-a-bare-metal-control-plane-node_ipi-install-expanding[Replacing a bare-metal control plane node] - -include::modules/dr-scenario-cluster-state-issues.adoc[leveloffset=+1] - - diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc deleted file mode 100644 index a15d6765d198..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="dr-recovering-expired-certs"] -= Recovering from expired control plane certificates -include::_attributes/common-attributes.adoc[] -:context: dr-recovering-expired-certs - -toc::[] - -// Recovering from expired control plane certificates -include::modules/dr-recover-expired-control-plane-certs.adoc[leveloffset=+1] diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/images b/backup_and_restore/control_plane_backup_and_restore/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/modules b/backup_and_restore/control_plane_backup_and_restore/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc deleted file mode 100644 index 5ad22219dc1b..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="replacing-unhealthy-etcd-member"] -= Replacing an unhealthy etcd member -include::_attributes/common-attributes.adoc[] -:context: replacing-unhealthy-etcd-member - -toc::[] - -This document describes the process to replace a single unhealthy etcd member. - -This process depends on whether the etcd member is unhealthy because the machine is not running or the node is not ready, or whether it is unhealthy because the etcd pod is crashlooping. - -[NOTE] -==== -If you have lost the majority of your control plane hosts, follow the disaster recovery procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state] instead of this procedure. - -If the control plane certificates are not valid on the member being replaced, then you must follow the procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates] instead of this procedure. - -If a control plane node is lost and a new one is created, the etcd cluster Operator handles generating the new TLS certificates and adding the node as an etcd member. -==== - -== Prerequisites - -* Take an xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to replacing an unhealthy etcd member. - -// Identifying an unhealthy etcd member -include::modules/restore-identify-unhealthy-etcd-member.adoc[leveloffset=+1] - -// Determining the state of the unhealthy etcd member -include::modules/restore-determine-state-etcd-member.adoc[leveloffset=+1] - -== Replacing the unhealthy etcd member - -Depending on the state of your unhealthy etcd member, use one of the following procedures: - -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose machine is not running or whose node is not ready] -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-crashlooping-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose etcd pod is crashlooping] -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-baremetal-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy stopped baremetal etcd member] - -// Replacing an unhealthy etcd member whose machine is not running or whose node is not ready -include::modules/restore-replace-stopped-etcd-member.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc#cpmso-ts-etcd-degraded_cpmso-troubleshooting[Recovering a degraded etcd Operator] - -// Replacing an unhealthy etcd member whose etcd pod is crashlooping -include::modules/restore-replace-crashlooping-etcd-member.adoc[leveloffset=+2] - -// Replacing an unhealthy baremetal stopped etcd member -include::modules/restore-replace-stopped-baremetal-etcd-member.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_replacing-unhealthy-etcd-member"] -== Additional resources -* xref:../../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion-etcd_deleting-machine[Quorum protection with machine lifecycle hooks] \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/snippets b/backup_and_restore/control_plane_backup_and_restore/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/graceful-cluster-restart.adoc b/backup_and_restore/graceful-cluster-restart.adoc deleted file mode 100644 index da115c5bf6e8..000000000000 --- a/backup_and_restore/graceful-cluster-restart.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="graceful-restart-cluster"] -= Restarting the cluster gracefully -include::_attributes/common-attributes.adoc[] -:context: graceful-restart-cluster - -toc::[] - -This document describes the process to restart your cluster after a graceful shutdown. - -Even though the cluster is expected to be functional after the restart, the cluster might not recover due to unexpected conditions, for example: - -* etcd data corruption during shutdown -* Node failure due to hardware -* Network connectivity issues - -If your cluster fails to recover, follow the steps to xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. - -== Prerequisites - -* You have xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[gracefully shut down your cluster]. - -// Restarting the cluster -include::modules/graceful-restart.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] for how to use an etcd backup to restore if your cluster failed to recover after restarting. diff --git a/backup_and_restore/graceful-cluster-shutdown.adoc b/backup_and_restore/graceful-cluster-shutdown.adoc deleted file mode 100644 index d5fc8860f78d..000000000000 --- a/backup_and_restore/graceful-cluster-shutdown.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="graceful-shutdown-cluster"] -= Shutting down the cluster gracefully -include::_attributes/common-attributes.adoc[] -:context: graceful-shutdown-cluster - -toc::[] - -This document describes the process to gracefully shut down your cluster. You might need to temporarily shut down your cluster for maintenance reasons, or to save on resource costs. - -== Prerequisites - -* Take an xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster. - -// Shutting down the cluster -include::modules/graceful-shutdown.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_restarting-restoring-cluster"] -== Additional resources - -* xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[Restarting the cluster gracefully] - -* xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restore to a previous cluster state] \ No newline at end of file diff --git a/backup_and_restore/images b/backup_and_restore/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/backup_and_restore/index.adoc b/backup_and_restore/index.adoc deleted file mode 100644 index 4784c633da6d..000000000000 --- a/backup_and_restore/index.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="backup-restore-overview"] -= Backup and restore -include::_attributes/common-attributes.adoc[] -:context: backup-restore-overview -:backup-restore-overview: - -toc::[] - -[id="control-plane-backup-restore-operations-overview"] -== Control plane backup and restore operations - -As a cluster administrator, you might need to stop an {product-title} cluster for a period and restart it later. Some reasons for restarting a cluster are that you need to perform maintenance on a cluster or want to reduce resource costs. In {product-title}, you can perform a xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[graceful shutdown of a cluster] so that you can easily restart the cluster later. - -You must xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[back up etcd data] before shutting down a cluster; etcd is the key-value store for {product-title}, which persists the state of all resource objects. An etcd backup plays a crucial role in disaster recovery. In {product-title}, you can also xref:../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace an unhealthy etcd member]. - -When you want to get your cluster running again, xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[restart the cluster gracefully]. - -[NOTE] -==== -A cluster's certificates expire one year after the installation date. You can shut down a cluster and expect it to restart gracefully while the certificates are still valid. Although the cluster automatically retrieves the expired control plane certificates, you must still xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[approve the certificate signing requests (CSRs)]. -==== - -You might run into several situations where {product-title} does not work as expected, such as: - -* You have a cluster that is not functional after the restart because of unexpected conditions, such as node failure, or network connectivity issues. -* You have deleted something critical in the cluster by mistake. -* You have lost the majority of your control plane hosts, leading to etcd quorum loss. - -You can always recover from a disaster situation by xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restoring your cluster to its previous state] using the saved etcd snapshots. - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion-etcd_deleting-machine[Quorum protection with machine lifecycle hooks] - -[id="application-backup-restore-operations-overview"] -== Application backup and restore operations - -As a cluster administrator, you can back up and restore applications running on {product-title} by using the OpenShift API for Data Protection (OADP). - -OADP backs up and restores Kubernetes resources and internal images, at the granularity of a namespace, by using the version of Velero that is appropriate for the version of OADP you install, according to the table in xref:../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#velero-obtaining-by-downloading_oadp-troubleshooting[Downloading the Velero CLI tool]. OADP backs up and restores persistent volumes (PVs) by using snapshots or Restic. For details, see xref:../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-features_oadp-features-plugins[OADP features]. - -[id="oadp-requirements"] -=== OADP requirements - -OADP has the following requirements: - -* You must be logged in as a user with a `cluster-admin` role. -* You must have object storage for storing backups, such as one of the following storage types: - -** OpenShift Data Foundation -** Amazon Web Services -** Microsoft Azure -** Google Cloud Platform -** S3-compatible object storage - -include::snippets/oadp-ocp-compat.adoc[] - -:FeatureName: The `CloudStorage` API for S3 storage -include::snippets/technology-preview.adoc[] - -* To back up PVs with snapshots, you must have cloud storage that has a native snapshot API or supports Container Storage Interface (CSI) snapshots, such as the following providers: - -** Amazon Web Services -** Microsoft Azure -** Google Cloud Platform -** CSI snapshot-enabled cloud storage, such as Ceph RBD or Ceph FS - -[NOTE] -==== -If you do not want to back up PVs by using snapshots, you can use link:https://restic.net/[Restic], which is installed by the OADP Operator by default. -==== - -[id="backing-up-and-restoring-applications"] -=== Backing up and restoring applications - -You back up applications by creating a `Backup` custom resource (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR].You can configure the following backup options: - -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Backup hooks] to run commands before or after the backup operation -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduled backups] -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups] - -You restore application backups by creating a `Restore` (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. You can configure xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[restore hooks] to run commands in init containers or in the application container during the restore operation. - -:backup-restore-overview!: diff --git a/backup_and_restore/modules b/backup_and_restore/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/backup_and_restore/snippets b/backup_and_restore/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/adding_service_cluster/_attributes b/builds/_attributes similarity index 100% rename from adding_service_cluster/_attributes rename to builds/_attributes diff --git a/applications/connecting_applications_to_services/_attributes b/builds/folder/_attributes similarity index 100% rename from applications/connecting_applications_to_services/_attributes rename to builds/folder/_attributes diff --git a/applications/connecting_applications_to_services/images b/builds/folder/images similarity index 100% rename from applications/connecting_applications_to_services/images rename to builds/folder/images diff --git a/applications/connecting_applications_to_services/modules b/builds/folder/modules similarity index 100% rename from applications/connecting_applications_to_services/modules rename to builds/folder/modules diff --git a/applications/connecting_applications_to_services/snippets b/builds/folder/snippets similarity index 100% rename from applications/connecting_applications_to_services/snippets rename to builds/folder/snippets diff --git a/cicd/images b/builds/images similarity index 100% rename from cicd/images rename to builds/images diff --git a/cicd/modules b/builds/modules similarity index 100% rename from cicd/modules rename to builds/modules diff --git a/applications/creating_applications/_attributes b/builds/sample/_attributes similarity index 100% rename from applications/creating_applications/_attributes rename to builds/sample/_attributes diff --git a/applications/creating_applications/images b/builds/sample/images similarity index 100% rename from applications/creating_applications/images rename to builds/sample/images diff --git a/applications/creating_applications/modules b/builds/sample/modules similarity index 100% rename from applications/creating_applications/modules rename to builds/sample/modules diff --git a/applications/deployments/snippets b/builds/sample/snippets similarity index 100% rename from applications/deployments/snippets rename to builds/sample/snippets diff --git a/adding_service_cluster/snippets b/builds/snippets similarity index 100% rename from adding_service_cluster/snippets rename to builds/snippets diff --git a/cicd/builds/understanding-image-builds.adoc b/builds/understanding-image-builds.adoc similarity index 59% rename from cicd/builds/understanding-image-builds.adoc rename to builds/understanding-image-builds.adoc index 6483bac9c0b7..bd67db2f05e0 100644 --- a/cicd/builds/understanding-image-builds.adoc +++ b/builds/understanding-image-builds.adoc @@ -9,9 +9,3 @@ toc::[] include::modules/builds-about.adoc[leveloffset=+1] include::modules/builds-strategy-docker-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-build.adoc[leveloffset=+2] diff --git a/cicd/_attributes b/cicd/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cicd/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cicd/builds/_attributes b/cicd/builds/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/builds/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/builds/advanced-build-operations.adoc b/cicd/builds/advanced-build-operations.adoc deleted file mode 100644 index c8a9279320d0..000000000000 --- a/cicd/builds/advanced-build-operations.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-build-operations"] -= Performing advanced builds -include::_attributes/common-attributes.adoc[] -:context: advanced-build-operations - -toc::[] - -The following sections provide instructions for advanced build operations including -setting build resources and maximum duration, assigning builds to nodes, chaining -builds, build pruning, and build run policies. - -include::modules/builds-setting-build-resources.adoc[leveloffset=+1] - -include::modules/builds-setting-maximum-duration.adoc[leveloffset=+1] - -include::modules/builds-assigning-builds-to-nodes.adoc[leveloffset=+1] - -include::modules/builds-chaining-builds.adoc[leveloffset=+1] - -include::modules/builds-build-pruning.adoc[leveloffset=+1] - -include::modules/builds-build-run-policy.adoc[leveloffset=+1] diff --git a/cicd/builds/basic-build-operations.adoc b/cicd/builds/basic-build-operations.adoc deleted file mode 100644 index 5e63cd49638b..000000000000 --- a/cicd/builds/basic-build-operations.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="basic-build-operations"] -= Performing and configuring basic builds -include::_attributes/common-attributes.adoc[] -:context: basic-build-operations - -toc::[] - -The following sections provide instructions for basic build operations, including starting and canceling builds, editing `BuildConfigs`, deleting `BuildConfigs`, viewing build details, and accessing build logs. - -include::modules/builds-basic-start-build.adoc[leveloffset=+1] -include::modules/builds-basic-start-re-run.adoc[leveloffset=+2] -include::modules/builds-basic-start-logs.adoc[leveloffset=+2] -include::modules/builds-basic-start-environment-variable.adoc[leveloffset=+2] -include::modules/builds-basic-start-source.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-build.adoc[leveloffset=+1] -include::modules/builds-basic-cancel-multiple.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-all.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-all-state.adoc[leveloffset=+2] -include::modules/builds-basic-edit-buildconfig.adoc[leveloffset=+1] -include::modules/builds-basic-delete-buildconfig.adoc[leveloffset=+1] -include::modules/builds-basic-view-build-details.adoc[leveloffset=+1] -include::modules/builds-basic-access-build-logs.adoc[leveloffset=+1] -include::modules/builds-basic-access-buildconfig-logs.adoc[leveloffset=+2] -include::modules/builds-basic-access-buildconfig-version-logs.adoc[leveloffset=+2] -include::modules/builds-basic-access-build-verbosity.adoc[leveloffset=+2] diff --git a/cicd/builds/build-configuration.adoc b/cicd/builds/build-configuration.adoc deleted file mode 100644 index a73773137d95..000000000000 --- a/cicd/builds/build-configuration.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="build-configuration"] -= Build configuration resources -include::_attributes/common-attributes.adoc[] -:context: build-configuration - -toc::[] - -Use the following procedure to configure build settings. - -include::modules/builds-configuration-parameters.adoc[leveloffset=+1] - -include::modules/builds-configuration-file.adoc[leveloffset=+1] diff --git a/cicd/builds/build-strategies.adoc b/cicd/builds/build-strategies.adoc deleted file mode 100644 index 3bc1fd41cb51..000000000000 --- a/cicd/builds/build-strategies.adoc +++ /dev/null @@ -1,91 +0,0 @@ -:_content-type: ASSEMBLY -[id="build-strategies"] -= Using build strategies -include::_attributes/common-attributes.adoc[] -:context: build-strategies - -toc::[] - -The following sections define the primary supported build strategies, and how to -use them. - -// Docker build strategy - -include::modules/builds-strategy-docker-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-docker-from-image.adoc[leveloffset=+2] - -include::modules/builds-strategy-dockerfile-path.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-build-arguments.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-squash-layers.adoc[leveloffset=+2] - -:context: build-strategies-docker - -include::modules/builds-using-build-volumes.adoc[leveloffset=+2] - - -// S2I build strategy - -include::modules/builds-strategy-s2i-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-s2i-incremental-builds.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-override-builder-image-scripts.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-environment-files.adoc[leveloffset=+3] - -include::modules/builds-strategy-s2i-buildconfig-environment.adoc[leveloffset=+3] - -include::modules/builds-strategy-s2i-ignore-source-files.adoc[leveloffset=+2] - -include::modules/images-create-s2i.adoc[leveloffset=+2] - -include::modules/images-create-s2i-build.adoc[leveloffset=+3] - -include::modules/images-create-s2i-scripts.adoc[leveloffset=+3] - -:context: build-strategies-s2i - -include::modules/builds-using-build-volumes.adoc[leveloffset=+2] - -// Custom build strategy - -include::modules/builds-strategy-custom-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-custom-from-image.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-secrets.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-environment-variables.adoc[leveloffset=+2] - -include::modules/images-custom.adoc[leveloffset=+2] - -// Pipeline build strategy - -include::modules/builds-strategy-pipeline-build.adoc[leveloffset=+1] - -include::modules/builds-understanding-openshift-pipeline.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-providing-jenkinsfile.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc[leveloffset=+3] - -include::modules/builds-tutorial-pipeline.adoc[leveloffset=+2] - -//include::modules/builds-strategy-force-pull-procedure.adoc[leveloffset=+1] - -//include::modules/builds-strategy-docker-force-pull-example.adoc[leveloffset=+2] - -//include::modules/builds-strategy-s2i-force-pull-example.adoc[leveloffset=+2] - -include::modules/builds-strategy-secrets-web-console.adoc[leveloffset=+1] - -include::modules/builds-strategy-enable-pulling-pushing.adoc[leveloffset=+1] diff --git a/cicd/builds/creating-build-inputs.adoc b/cicd/builds/creating-build-inputs.adoc deleted file mode 100644 index 32354cd232c9..000000000000 --- a/cicd/builds/creating-build-inputs.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-build-inputs"] -= Creating build inputs -include::_attributes/common-attributes.adoc[] -:context: creating-build-inputs - -toc::[] - -Use the following sections for an overview of build inputs, instructions on how -to use inputs to provide source content for builds to operate on, and how to use -build environments and create secrets. - -include::modules/builds-define-build-inputs.adoc[leveloffset=+1] - -include::modules/builds-dockerfile-source.adoc[leveloffset=+1] - -include::modules/builds-image-source.adoc[leveloffset=+1] - -include::modules/builds-source-code.adoc[leveloffset=+1] - -include::modules/builds-using-proxy-git-cloning.adoc[leveloffset=+2] - -include::modules/builds-adding-source-clone-secrets.adoc[leveloffset=+2] - -include::modules/builds-automatically-add-source-clone-secrets.adoc[leveloffset=+3] - -include::modules/builds-manually-add-source-clone-secrets.adoc[leveloffset=+3] - -include::modules/builds-gitconfig-file.adoc[leveloffset=+3] - -include::modules/builds-gitconfig-file-secured-git.adoc[leveloffset=+3] - -include::modules/builds-source-secret-basic-auth.adoc[leveloffset=+3] - -include::modules/builds-source-secret-ssh-key-auth.adoc[leveloffset=+3] - -include::modules/builds-source-secret-trusted-ca.adoc[leveloffset=+3] - -include::modules/builds-source-secret-combinations.adoc[leveloffset=+3] - -include::modules/builds-source-secret-combinations-ssh-gitconfig.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-gitconfig-ca.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-ca.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc[leveloffset=+4] - -include::modules/builds-binary-source.adoc[leveloffset=+1] - -include::modules/builds-input-secrets-configmaps.adoc[leveloffset=+1] - -include::modules/builds-secrets-overview.adoc[leveloffset=+2] - -include::modules/builds-creating-secrets.adoc[leveloffset=+2] - -include::modules/builds-using-secrets.adoc[leveloffset=+2] - -include::modules/builds-adding-input-secrets-configmaps.adoc[leveloffset=+2] - -include::modules/builds-source-to-image.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] - -include::modules/builds-docker-strategy.adoc[leveloffset=+2] - -include::modules/builds-custom-strategy.adoc[leveloffset=+2] -endif::[] - -include::modules/builds-using-external-artifacts.adoc[leveloffset=+1] - -include::modules/builds-docker-credentials-private-registries.adoc[leveloffset=+1] - -include::modules/builds-build-environment.adoc[leveloffset=+1] - -include::modules/builds-using-build-fields-as-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-using-secrets-as-environment-variables.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cicd/builds/creating-build-inputs.adoc#builds-input-secrets-configmaps_creating-build-inputs[Input secrets and config maps] - -include::modules/builds-service-serving-certificate-secrets.adoc[leveloffset=+1] - -include::modules/builds-secrets-restrictions.adoc[leveloffset=+1] diff --git a/cicd/builds/custom-builds-buildah.adoc b/cicd/builds/custom-builds-buildah.adoc deleted file mode 100644 index 9ea928151cb0..000000000000 --- a/cicd/builds/custom-builds-buildah.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="custom-builds-buildah"] -= Custom image builds with Buildah -include::_attributes/common-attributes.adoc[] -:context: custom-builds-buildah - -toc::[] - - -With {product-title} {product-version}, a docker socket will not be present on the host -nodes. This means the _mount docker socket_ option of a custom build is not -guaranteed to provide an accessible docker socket for use within a custom build -image. - -If you require this capability in order to build and push images, add the Buildah -tool your custom build image and use it to build and push the image within your -custom build logic. The following is an example of how to run custom builds with -Buildah. - -[NOTE] -==== -Using the custom build strategy requires permissions that normal users do -not have by default because it allows the user to execute arbitrary code inside -a privileged container running on the cluster. This level of access can be used -to compromise the cluster and therefore should be granted only to users who are -trusted with administrative privileges on the cluster. -==== - -== Prerequisites - -* Review how to xref:../../cicd/builds/securing-builds-by-strategy.adoc#securing-builds-by-strategy[grant custom build permissions]. - - -include::modules/builds-create-custom-build-artifacts.adoc[leveloffset=+1] -include::modules/builds-build-custom-builder-image.adoc[leveloffset=+1] -include::modules/builds-use-custom-builder-image.adoc[leveloffset=+1] diff --git a/cicd/builds/images b/cicd/builds/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/builds/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/builds/managing-build-output.adoc b/cicd/builds/managing-build-output.adoc deleted file mode 100644 index 1378cd27f6e5..000000000000 --- a/cicd/builds/managing-build-output.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-build-output"] -= Managing build output -include::_attributes/common-attributes.adoc[] -:context: managing-build-output - -toc::[] - - -Use the following sections for an overview of and instructions for managing -build output. - -include::modules/builds-docker-source-build-output.adoc[leveloffset=+1] - -include::modules/builds-output-image-environment-variables.adoc[leveloffset=+1] - -include::modules/builds-output-image-labels.adoc[leveloffset=+1] diff --git a/cicd/builds/modules b/cicd/builds/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/builds/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/builds/running-entitled-builds.adoc b/cicd/builds/running-entitled-builds.adoc deleted file mode 100644 index 4eef8f5985dc..000000000000 --- a/cicd/builds/running-entitled-builds.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="running-entitled-builds"] -= Using Red Hat subscriptions in builds -include::_attributes/common-attributes.adoc[] -:context: running-entitled-builds - -toc::[] - -[role="_abstract"] -Use the following sections to run entitled builds on {product-title}. - -include::modules/builds-create-imagestreamtag.adoc[leveloffset=+1] - -include::modules/builds-source-secrets-entitlements.adoc[leveloffset=+1] - -== Running builds with Subscription Manager - -include::modules/builds-strategy-docker-entitled-subman.adoc[leveloffset=+2] - -== Running builds with Red Hat Satellite subscriptions - -include::modules/builds-source-input-satellite-config.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-entitled-satellite.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/solutions/5847331[How to use builds with Red Hat Satellite subscriptions and which certificate to use] - -// Beginning of "Running entitled builds with SharedSecret objects" section - -include::modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc[leveloffset=+1] - -// End of "Running entitled builds with SharedSecret objects" section - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/insights-operator-simple-access.adoc#insights-operator-simple-access[Importing simple content access certificates with Insights Operator] -* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] -* xref:../../openshift_images/image-streams-manage.adoc#image-streams-managing[Managing image streams] -* xref:../../cicd/builds/build-strategies.adoc#build-strategies[build strategy] diff --git a/cicd/builds/securing-builds-by-strategy.adoc b/cicd/builds/securing-builds-by-strategy.adoc deleted file mode 100644 index 9809d2327602..000000000000 --- a/cicd/builds/securing-builds-by-strategy.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="securing-builds-by-strategy"] -= Securing builds by strategy -include::_attributes/common-attributes.adoc[] -:context: securing-builds-by-strategy - -toc::[] - -Builds in {product-title} are run in privileged containers. Depending on the build strategy used, if you have privileges, you can run builds to escalate their permissions on the cluster and host nodes. And as a security measure, it limits who can run builds and the strategy that is used for those builds. Custom builds are inherently less safe than source builds, because they can execute any code within a privileged container, and are disabled by default. Grant docker build permissions with caution, because a vulnerability in the Dockerfile processing logic could result in a privileges being granted on the host node. - -By default, all users that can create builds are granted permission to use the docker and Source-to-image (S2I) build strategies. Users with cluster administrator privileges can enable the custom build strategy, as referenced in the restricting build strategies to a user globally section. - -You can control who can build and which build strategies they can use by using an authorization policy. Each build strategy has a corresponding build subresource. A user must have permission to create a build and permission to create on the build strategy subresource to create builds using that strategy. Default roles are provided that grant the create permission on the build strategy subresource. - -.Build Strategy Subresources and Roles -[options="header"] -|=== - -|Strategy |Subresource |Role - -|Docker -|builds/docker -|system:build-strategy-docker - -|Source-to-Image -|builds/source -|system:build-strategy-source - -|Custom -|builds/custom -|system:build-strategy-custom - -|JenkinsPipeline -|builds/jenkinspipeline -|system:build-strategy-jenkinspipeline - -|=== - -include::modules/builds-disabling-build-strategy-globally.adoc[leveloffset=+1] -include::modules/builds-restricting-build-strategy-globally.adoc[leveloffset=+1] -include::modules/builds-restricting-build-strategy-to-user.adoc[leveloffset=+1] diff --git a/cicd/builds/setting-up-trusted-ca.adoc b/cicd/builds/setting-up-trusted-ca.adoc deleted file mode 100644 index 6adc4e59cb6e..000000000000 --- a/cicd/builds/setting-up-trusted-ca.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-up-trusted-ca"] -= Setting up additional trusted certificate authorities for builds -ifndef::openshift-dedicated,openshift-rosa[] -include::_attributes/common-attributes.adoc[] -endif::[] -ifdef::openshift-dedicated,openshift-rosa[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::[] -:context: setting-up-trusted-ca - -toc::[] - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -Use the following sections to set up additional certificate authorities (CA) to be trusted by builds when pulling images from an image registry. - -The procedure requires a cluster administrator to create a `ConfigMap` and add additional CAs as keys in the `ConfigMap`. - -* The `ConfigMap` must be created in the `openshift-config` namespace. -* `domain` is the key in the `ConfigMap` and `value` is the PEM-encoded certificate. -** Each CA must be associated with a domain. The domain format is `hostname[..port]`. -* The `ConfigMap` name must be set in the `image.config.openshift.io/cluster` cluster scoped configuration resource's `spec.additionalTrustedCA` field. -//* No longer needs single PEM bundle - -include::modules/configmap-adding-ca.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap[Create a `ConfigMap`] -* link:https://kubectl.docs.kubernetes.io/guides/config_management/secrets_configmaps/[Secrets and `ConfigMaps`] -ifndef::openshift-rosa,openshift-dedicated[] -* xref:../../networking/configuring-a-custom-pki.adoc#configuring-a-custom-pki[Configuring a custom PKI] -endif::[] -endif::[] diff --git a/cicd/builds/snippets b/cicd/builds/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/builds/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/builds/triggering-builds-build-hooks.adoc b/cicd/builds/triggering-builds-build-hooks.adoc deleted file mode 100644 index adca76892240..000000000000 --- a/cicd/builds/triggering-builds-build-hooks.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="triggering-builds-build-hooks"] -= Triggering and modifying builds -include::_attributes/common-attributes.adoc[] -:context: triggering-builds-build-hooks - -toc::[] - -The following sections outline how to trigger builds and modify builds using build hooks. - -include::modules/builds-triggers.adoc[leveloffset=+1] - -include::modules/builds-webhook-triggers.adoc[leveloffset=+2] - -include::modules/builds-using-github-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-gitlab-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-bitbucket-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-generic-webhooks.adoc[leveloffset=+3] - -include::modules/builds-displaying-webhook-urls.adoc[leveloffset=+3] - -include::modules/builds-using-image-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-identifying-image-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-configuration-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-setting-triggers-manually.adoc[leveloffset=+3] - -include::modules/builds-build-hooks.adoc[leveloffset=+1] - -include::modules/builds-configuring-post-commit-build-hooks.adoc[leveloffset=+2] - -include::modules/builds-using-cli-post-commit-build-hooks.adoc[leveloffset=+2] diff --git a/cicd/builds/troubleshooting-builds.adoc b/cicd/builds/troubleshooting-builds.adoc deleted file mode 100644 index 92cd14bfd15d..000000000000 --- a/cicd/builds/troubleshooting-builds.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-builds_{context}"] -= Troubleshooting builds -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-builds - -toc::[] - -Use the following to troubleshoot build issues. - -include::modules/builds-troubleshooting-access-resources.adoc[leveloffset=+1] - -include::modules/builds-troubleshooting-service-certificate-generation.adoc[leveloffset=+1] diff --git a/cicd/builds/understanding-buildconfigs.adoc b/cicd/builds/understanding-buildconfigs.adoc deleted file mode 100644 index bf87540e52ac..000000000000 --- a/cicd/builds/understanding-buildconfigs.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-buildconfigs"] -= Understanding build configurations -include::_attributes/common-attributes.adoc[] -:context: understanding-builds - -toc::[] - -The following sections define the concept of a build, build configuration, and outline the primary build strategies available. - -include::modules/builds-buildconfig.adoc[leveloffset=+1] diff --git a/cicd/gitops/_attributes b/cicd/gitops/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/gitops/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/gitops/about-sizing-requirements-gitops.adoc b/cicd/gitops/about-sizing-requirements-gitops.adoc deleted file mode 100644 index 1907bf67f9b4..000000000000 --- a/cicd/gitops/about-sizing-requirements-gitops.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-sizing-requirements-gitops"] -= Sizing requirements for GitOps Operator -include::_attributes/common-attributes.adoc[] -:context: about-sizing-requirements-gitops - -toc::[] - -[role="_abstract"] -The sizing requirements page displays the sizing requirements for installing {gitops-title} on {product-title}. It also provides the sizing details for the default ArgoCD instance that is instantiated by the GitOps Operator. - -include::modules/sizing-requirements-for-gitops.adoc[leveloffset=+1] diff --git a/cicd/gitops/argo-cd-custom-resource-properties.adoc b/cicd/gitops/argo-cd-custom-resource-properties.adoc deleted file mode 100644 index ad6e49faa176..000000000000 --- a/cicd/gitops/argo-cd-custom-resource-properties.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="argo-cd-custom-resource-properties"] -= Argo CD Operator -include::_attributes/common-attributes.adoc[] -:context: argo-cd-custom-resource-properties - -toc::[] - -[role="_abstract"] -The `ArgoCD` custom resource is a Kubernetes Custom Resource (CRD) that describes the desired state for a given Argo CD cluster that allows you to configure the components which make up an Argo CD cluster. - -include::modules/argo-cd-command-line.adoc[leveloffset=+1] -include::modules/gitops-argo-cd-properties.adoc[leveloffset=+1] -include::modules/gitops-repo-server-properties.adoc[leveloffset=+1] -include::modules/gitops-argo-cd-notification.adoc[leveloffset=+1] diff --git a/cicd/gitops/collecting-debugging-data-for-support.adoc b/cicd/gitops/collecting-debugging-data-for-support.adoc deleted file mode 100644 index c2db643ed117..000000000000 --- a/cicd/gitops/collecting-debugging-data-for-support.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="collecting-debugging-data-for-support"] -= Collecting debugging data for a support case -include::_attributes/common-attributes.adoc[] -:context: collecting-debugging-data-for-support - -toc::[] - -When you open a support case, you must provide debugging information about your cluster to the Red Hat Support team. You can use the `must-gather` tool to collect diagnostic information for project-level resources, cluster-level resources, and {gitops-title} components.  - -[NOTE] -==== -For prompt support, provide diagnostic information for both {product-title} and {gitops-title}. -==== - -include::modules/about-must-gather.adoc[leveloffset=+1] -include::modules/collecting-gitops-debugging-data.adoc[leveloffset=+1] diff --git a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc b/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc deleted file mode 100644 index 05240fc23661..000000000000 --- a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations"] -= Configuring an OpenShift cluster by deploying an application with cluster configurations -include::_attributes/common-attributes.adoc[] -:context: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations - -toc::[] - -With {gitops-title}, you can configure Argo CD to recursively sync the content of a Git directory with an application that contains custom configurations for your cluster. - -.Prerequisites - -* You have logged in to the `product-title` cluster as an administrator. -* You have installed the `gitops-title` Operator in your cluster. -* You have logged into Argo CD instance. - -include::modules/gitops-using-argo-cd-instance-to-manage-cluster-scoped-resources.adoc[leveloffset=+1] - -include::modules/gitops-default-permissions-of-an-argocd-instance.adoc[leveloffset=+1] - -include::modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* To learn more about taints and tolerations, see xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -* For more information on infrastructure machine sets, see xref:../../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Creating infrastructure machine sets]. - -include::modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc[leveloffset=+1] - -include::modules/gitops-creating-an-application-by-using-the-oc-tool.adoc[leveloffset=+1] - -include::modules/gitops-synchronizing-your-application-application-with-your-git-repository.adoc[leveloffset=+1] - -include::modules/gitops-inbuilt-permissions-for-cluster-config.adoc[leveloffset=+1] - -include::modules/gitops-additional-permissions-for-cluster-config.adoc[leveloffset=+1] - -include::modules/gitops-installing-olm-operators-using-gitops.adoc[leveloffset=+1] \ No newline at end of file diff --git a/cicd/gitops/configuring-argo-cd-rbac.adoc b/cicd/gitops/configuring-argo-cd-rbac.adoc deleted file mode 100644 index 00b6c827d7b5..000000000000 --- a/cicd/gitops/configuring-argo-cd-rbac.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-argo-cd-rbac"] -= Configuring Argo CD RBAC -include::_attributes/common-attributes.adoc[] -:context: configuring-argo-cd-rbac - -toc::[] - -[role="_abstract"] -By default, if you are logged into Argo CD using RHSSO, you are a read-only user. You can change and manage the user level access. - -include::modules/configuring-user-level-access.adoc[leveloffset=+1] -include::modules/modify-rhsso-requests-limits.adoc[leveloffset=+1] diff --git a/cicd/gitops/configuring-resource-quota.adoc b/cicd/gitops/configuring-resource-quota.adoc deleted file mode 100644 index 937f1fb3c44a..000000000000 --- a/cicd/gitops/configuring-resource-quota.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-resource-quota"] -= Configuring resource quota or requests -include::_attributes/common-attributes.adoc[] -:context: configuring-resource-quota - -toc::[] - -[role="_abstract"] -With the Argo CD Custom Resource, you can create, update, and delete resource requests and limits for Argo CD workloads. - -include::modules/configure-workloads.adoc[leveloffset=+1] -include::modules/patch-argocd-instance.adoc[leveloffset=+1] -include::modules/remove-resource-requirements.adoc[leveloffset=+1] diff --git a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc b/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc deleted file mode 100644 index f293b6f82222..000000000000 --- a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-sso-for-argo-cd-on-openshift"] -= Configuring SSO for Argo CD on OpenShift -include::_attributes/common-attributes.adoc[] -:context: configuring-sso-for-argo-cd-on-openshift - -toc::[] - -After the {gitops-title} Operator is installed, Argo CD automatically creates a user with `admin` permissions. To manage multiple users, Argo CD allows cluster administrators to configure SSO. - -.Prerequisites -* Red Hat SSO is installed on the cluster. - -include::modules/gitops-creating-a-new-client-in-keycloak.adoc[leveloffset=+1] - -include::modules/gitops-configuring-the-groups-claim.adoc[leveloffset=+1] - -include::modules/gitops-configuring-argo-cd-oidc.adoc[leveloffset=+1] - -include::modules/gitops-keycloak-identity-brokering-with-openshift-oauthclient.adoc[leveloffset=+1] - -include::modules/gitops-registering-an-additional-oauth-client.adoc[leveloffset=+1] - -include::modules/gitops-configuring-groups-and-argocd-rbac.adoc[leveloffset=+1] - -//include::modules/gitops-enabling-dex.adoc[leveloffset=+1] - -include::modules/gitops-in-built-permissions.adoc[leveloffset=+1] - -//// -[role="_additional-resources"] -.Additional resources -* link:https://stedolan.github.io/jq/[`jq` command-line JSON processor documentation.] -* link:https://argoproj.github.io/argo-cd/operator-manual/rbac/[Argo CD upstream documentation, RBAC Configuration section]. -//// diff --git a/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc b/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc deleted file mode 100644 index dbd0d8683a97..000000000000 --- a/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-sso-for-argo-cd-using-keycloak"] -= Configuring SSO for Argo CD using Keycloak -include::_attributes/common-attributes.adoc[] -:context: configuring-sso-for-argo-cd-using-keycloak - -toc::[] - -After the {gitops-title} Operator is installed, Argo CD automatically creates a user with `admin` permissions. To manage multiple users, cluster administrators can use Argo CD to configure Single Sign-On (SSO). - -.Prerequisites -* Red Hat SSO is installed on the cluster. -* {gitops-title} Operator is installed on the cluster. -* Argo CD is installed on the cluster. - -include::modules/gitops-creating-a-new-client-using-keycloak.adoc[leveloffset=+1] - -include::modules/gitops-logging-into-keycloak.adoc[leveloffset=+1] - -include::modules/gitops-uninstall-keycloak.adoc[leveloffset=+1] - -//// -[role="_additional-resources"] -.Additional resources -* link:https://stedolan.github.io/jq/[`jq` command-line JSON processor documentation.] -* link:https://argoproj.github.io/argo-cd/operator-manual/rbac/[Argo CD upstream documentation, RBAC Configuration section]. -//// diff --git a/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc b/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc deleted file mode 100644 index 28fbb2790c70..000000000000 --- a/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-sso-for-argo-cd-using-dex"] -= Configuring SSO for Argo CD using Dex -include::_attributes/common-attributes.adoc[] -:context: configuring-sso-for-argo-cd-using-dex - -toc::[] - -After the {gitops-title} Operator is installed, Argo CD automatically creates a user with `admin` permissions. To manage multiple users, cluster administrators can use Argo CD to configure Single Sign-On (SSO). - -[IMPORTANT] -==== -The `spec.dex` parameter in the ArgoCD CR is deprecated. In a future release of {gitops-title} v1.10.0, configuring Dex using the `spec.dex` parameter in the ArgoCD CR is planned to be removed. Consider using the `.spec.sso` parameter instead. -==== - -include::modules/gitops-creating-a-new-client-in-dex.adoc[leveloffset=+1] - -include::modules/gitops-dex-role-mappings.adoc[leveloffset=+2] - -//include::modules/gitops-configuring-argo-cd-using-dex-github-conector.adoc[leveloffset=+1] - -include::modules/gitops-disable-dex.adoc[leveloffset=+1] - -include::modules/gitops-disable-dex-using-spec-sso.adoc[leveloffset=+1] - -//// -[role="_additional-resources"] -.Additional resources -* link:https://stedolan.github.io/jq/[`jq` command-line JSON processor documentation.] -* link:https://argoproj.github.io/argo-cd/operator-manual/rbac/[Argo CD upstream documentation, RBAC Configuration section]. -//// diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images deleted file mode 120000 index 4dd3347de19a..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images +++ /dev/null @@ -1 +0,0 @@ -../../../images \ No newline at end of file diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc b/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc deleted file mode 100644 index 53a9fc1dacbd..000000000000 --- a/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="deploying-a-spring-boot-application-with-argo-cd"] -= Deploying a Spring Boot application with Argo CD -include::_attributes/common-attributes.adoc[] -:context: deploying-a-spring-boot-application-with-argo-cd - -toc::[] - -With Argo CD, you can deploy your applications to the OpenShift cluster either by using the Argo CD dashboard or by using the `oc` tool. - -.Prerequisites - -* Red Hat OpenShift GitOps is installed in your cluster. -* Logged into Argo CD instance. - -include::modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc[leveloffset=+1] - -include::modules/gitops-creating-an-application-by-using-the-oc-tool.adoc[leveloffset=+1] - -include::modules/gitops-verifying-argo-cd-self-healing-behavior.adoc[leveloffset=+1] diff --git a/cicd/gitops/gitops-release-notes.adoc b/cicd/gitops/gitops-release-notes.adoc deleted file mode 100644 index 1ac246a31ff8..000000000000 --- a/cicd/gitops/gitops-release-notes.adoc +++ /dev/null @@ -1,111 +0,0 @@ -//OpenShift GitOps Release Notes -:_content-type: ASSEMBLY -[id="gitops-release-notes"] -= {gitops-title} release notes -:context: gitops-release-notes -include::_attributes/common-attributes.adoc[] - -toc::[] - -[role="_abstract"] -{gitops-title} is a declarative way to implement continuous deployment for cloud native applications. {gitops-title} ensures consistency in applications when you deploy them to different clusters in different environments, such as: development, staging, and production. {gitops-title} helps you automate the following tasks: - -* Ensure that the clusters have similar states for configuration, monitoring, and storage -* Recover or recreate clusters from a known state -* Apply or revert configuration changes to multiple {product-title} clusters -* Associate templated configuration with different environments -* Promote applications across clusters, from staging to production - -For an overview of {gitops-title}, see xref:../../cicd/gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps]. - -include::modules/go-compatibility-and-support-matrix.adoc[leveloffset=+1] - -include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] - -// Modules included, most to least recent -include::modules/gitops-release-notes-1-9-1.adoc[leveloffset=+1] -include::modules/gitops-release-notes-1-9-0.adoc[leveloffset=+1] -// 1.25.0 additional resources, OCP docs -ifdef::openshift-enterprise[] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/admin/olm-configuring-proxy-support.adoc#olm-inject-custom-ca_olm-configuring-proxy-support[Injecting a custom CA certificate] -endif::[] - -include::modules/gitops-release-notes-1-8-3.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-8-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-8-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-8-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-7-4.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-7-3.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-7-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-7-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-6-4.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-6-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-6-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-6-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-9.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-7.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-6.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-5.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-4.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-3.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-5-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-13.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-12.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-11.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-6.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-5.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-3.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-7.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-6.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-1.adoc[leveloffset=+1] diff --git a/cicd/gitops/health-information-for-resources-deployment.adoc b/cicd/gitops/health-information-for-resources-deployment.adoc deleted file mode 100644 index 0eb5a6be00b2..000000000000 --- a/cicd/gitops/health-information-for-resources-deployment.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="health-information-for-resources-deployment"] -= Monitoring health information for application resources and deployments -:context: health-information-for-resources-deployment -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {gitops-title} *Environments* page in the *Developer* perspective of the {product-title} web console shows a list of the successful deployments of the application environments, along with links to the revision for each deployment. - -The *Application environments* page in the *Developer* perspective of the {product-title} web console displays the health status of the application resources, such as routes, synchronization status, deployment configuration, and deployment history. - -The environments pages in the *Developer* perspective of the {product-title} web console are decoupled from the {gitops-title} Application Manager command-line interface (CLI), `kam`. You do not have to use `kam` to generate Application Environment manifests for the environments to show up in the *Developer* perspective of the {product-title} web console. You can use your own manifests, but the environments must still be represented by namespaces. In addition, specific labels and annotations are still needed. - -include::modules/go-health-monitoring.adoc[leveloffset=+1] diff --git a/cicd/gitops/images b/cicd/gitops/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/gitops/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/gitops/installing-openshift-gitops.adoc b/cicd/gitops/installing-openshift-gitops.adoc deleted file mode 100644 index 2435189f3b21..000000000000 --- a/cicd/gitops/installing-openshift-gitops.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="getting-started-with-openshift-gitops"] -= Installing {gitops-title} -include::_attributes/common-attributes.adoc[] -:context: installing-openshift-gitops - -toc::[] - -[role="_abstract"] -{gitops-title} uses Argo CD to manage specific cluster-scoped resources, including cluster Operators, optional Operator Lifecycle Manager (OLM) Operators, and user management. - -[discrete] -== Prerequisites - -* You have access to the {product-title} web console. -* You are logged in as a user with the `cluster-admin` role. -* You are logged in to the {product-title} cluster as an administrator. -* Your cluster has the xref:../../installing/cluster-capabilities.adoc#marketplace-operator_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. - -[WARNING] -==== -If you have already installed the Community version of the Argo CD Operator, remove the Argo CD Community Operator before you install the {gitops-title} Operator. -==== - -This guide explains how to install the {gitops-title} Operator to an {product-title} cluster and log in to the Argo CD instance. - -include::modules/installing-gitops-operator-in-web-console.adoc[leveloffset=+1] - -include::modules/installing-gitops-operator-using-cli.adoc[leveloffset=+1] - -include::modules/logging-in-to-the-argo-cd-instance-by-using-the-argo-cd-admin-account.adoc[leveloffset=+1] diff --git a/cicd/gitops/modules b/cicd/gitops/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/gitops/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc b/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc deleted file mode 100644 index ad89c27ea1ef..000000000000 --- a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="monitoring-argo-cd-custom-resource-workloads"] -= Monitoring Argo CD custom resource workloads -include::_attributes/common-attributes.adoc[] -:context: monitoring-argo-cd-custom-resource-workloads - -toc::[] - -[role="_abstract"] -With {gitops-title}, you can monitor the availability of Argo CD custom resource workloads for specific Argo CD instances. By monitoring Argo CD custom resource workloads, you have the latest information about the state of your Argo CD instances by enabling alerts for them. When the component workload pods such as application-controller, repo-server, or server of the corresponding Argo CD instance are unable to come up for certain reasons and there is a drift between the number of ready replicas and the number of desired replicas for a certain period of time, the Operator then triggers the alerts. - -You can enable and disable the setting for monitoring Argo CD custom resource workloads. - -// Prerequisites for monitoring Argo CD custom resource workloads -[discrete] -== Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* {gitops-title} is installed in your cluster. -* The monitoring stack is configured in your cluster in the `openshift-monitoring` project. In addition, the Argo CD instance is in a namespace that you can monitor through Prometheus. -* The `kube-state-metrics` service is running in your cluster. -* Optional: If you are enabling monitoring for an Argo CD instance already present in a user-defined project, ensure that the monitoring is xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects_enabling-monitoring-for-user-defined-projects[enabled for user-defined projects] in your cluster. -+ -[NOTE] -==== -If you want to enable monitoring for an Argo CD instance in a namespace that is not watched by the default `openshift-monitoring` stack, for example, any namespace that does not start with `openshift-*`, then you must enable user workload monitoring in your cluster. This action enables the monitoring stack to pick up the created PrometheusRule. -==== - -//Enabling Monitoring for Argo CD custom resource workloads -include::modules/gitops-enabling-monitoring-for-argo-cd-custom-resource-workloads.adoc[leveloffset=+1] - -//Disabling Monitoring for Argo CD custom resource workloads -include::modules/gitops-disabling-monitoring-for-argo-cd-custom-resource-workloads.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_monitoring-argo-cd-custom-resource-workloads"] -== Additional resources -* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] diff --git a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc b/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc deleted file mode 100644 index 21122fe29469..000000000000 --- a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="run-gitops-control-plane-workload-on-infra-nodes"] -= Running {gitops-shortname} control plane workloads on infrastructure nodes -:context: run-gitops-control-plane-workload-on-infra-nodes -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can use infrastructure nodes to prevent additional billing cost against subscription counts. - -You can use the {product-title} to run certain workloads on infrastructure nodes installed by the {gitops-title} Operator. This comprises the workloads that are installed by the {gitops-title} Operator by default in the `openshift-gitops` namespace, including the default Argo CD instance in that namespace. - -[NOTE] -==== -Any other Argo CD instances installed to user namespaces are not eligible to run on infrastructure nodes. -==== - -include::modules/go-add-infra-nodes.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_run-gitops-control-plane-workload-on-infra-nodes"] -== Additional resources -* To learn more about taints and tolerations, see xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -* For more information on infrastructure machine sets, see xref:../../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Creating infrastructure machine sets]. diff --git a/cicd/gitops/setting-up-argocd-instance.adoc b/cicd/gitops/setting-up-argocd-instance.adoc deleted file mode 100644 index 55756aa831fe..000000000000 --- a/cicd/gitops/setting-up-argocd-instance.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-up-argocd-instance"] -= Setting up an Argo CD instance -include::_attributes/common-attributes.adoc[] -:context: setting-up-argocd-instance - -toc::[] - -By default, the {gitops-title} installs an instance of Argo CD in the `openshift-gitops` namespace with additional permissions for managing certain cluster-scoped resources. To manage cluster configurations or deploy applications, you can install and deploy a new Argo CD instance. By default, any new instance has permissions to manage resources only in the namespace where it is deployed. - -include::modules/gitops-argo-cd-installation.adoc[leveloffset=+1] - -include::modules/gitops-enable-replicas-for-argo-cd-server.adoc[leveloffset=+1] - -include::modules/gitops-deploy-resources-different-namespaces.adoc[leveloffset=+1] - -include::modules/gitops-customize-argo-cd-consolelink.adoc[leveloffset=+1] diff --git a/cicd/gitops/snippets b/cicd/gitops/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/gitops/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/gitops/troubleshooting-issues-in-GitOps.adoc b/cicd/gitops/troubleshooting-issues-in-GitOps.adoc deleted file mode 100644 index 6eb4cefc9d4a..000000000000 --- a/cicd/gitops/troubleshooting-issues-in-GitOps.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-issues-in-GitOps"] - -= Troubleshooting issues in {gitops-title} -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-issues-in-GitOps - -toc::[] - -When working with {gitops-title}, you might face issues related to performance, monitoring, configuration, and other aspects. This section helps you to understand those issues and provide solutions to resolve them. - -include::modules/con_auto-reboot-during-argo-cd-sync-with-machine-configurations.adoc[leveloffset=+1] -include::modules/performance-challenges-in-machine-configurations-and-argo-cd.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_troubleshooting-issues-in-GitOps"] -== Additional resources -* link:https://developers.redhat.com/articles/2021/12/20/prevent-auto-reboot-during-argo-cd-sync-machine-configs#[Preventing nodes from auto-rebooting during Argo CD sync with machine configs] diff --git a/cicd/gitops/understanding-openshift-gitops.adoc b/cicd/gitops/understanding-openshift-gitops.adoc deleted file mode 100644 index 7ab0f91ad813..000000000000 --- a/cicd/gitops/understanding-openshift-gitops.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-openshift-gitops"] -= Understanding OpenShift GitOps -include::_attributes/common-attributes.adoc[] -:context: understanding-openshift-gitops - -toc::[] - -//Concept Module -include::modules/about-gitops.adoc[leveloffset=+1] - -//Concept Module -include::modules/about-redhat-openshift-gitops.adoc[leveloffset=+1] diff --git a/cicd/gitops/uninstalling-openshift-gitops.adoc b/cicd/gitops/uninstalling-openshift-gitops.adoc deleted file mode 100644 index 17e3ecadfa01..000000000000 --- a/cicd/gitops/uninstalling-openshift-gitops.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-openshift-gitops"] -= Uninstalling OpenShift GitOps -include::_attributes/common-attributes.adoc[] -:context: uninstalling-openshift-gitops - -toc::[] - -Uninstalling the {gitops-title} Operator is a two-step process: - -. Delete the Argo CD instances that were added under the default namespace of the {gitops-title} Operator. -. Uninstall the {gitops-title} Operator. - -Uninstalling only the Operator will not remove the Argo CD instances created. - -include::modules/go-deleting-argocd-instance.adoc[leveloffset=+1] - -include::modules/go-uninstalling-gitops-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] section. diff --git a/cicd/index.adoc b/cicd/index.adoc deleted file mode 100644 index e9ed1f0b7af2..000000000000 --- a/cicd/index.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="ci-cd-overview"] -= {product-title} CI/CD overview -include::_attributes/common-attributes.adoc[] -:context: ci-cd-overview - -toc::[] - - -{product-title} is an enterprise-ready Kubernetes platform for developers, which enables organizations to automate the application delivery process through DevOps practices, such as continuous integration (CI) and continuous delivery (CD). To meet your organizational needs, the {product-title} provides the following CI/CD solutions: - -* OpenShift Builds -* {pipelines-shortname} -* OpenShift GitOps - -[id="openshift-builds"] -== OpenShift Builds -With OpenShift Builds, you can create cloud-native apps by using a declarative build process. You can define the build process in a YAML file that you use to create a BuildConfig object. This definition includes attributes such as build triggers, input parameters, and source code. When deployed, the BuildConfig object typically builds a runnable image and pushes it to a container image registry. - -OpenShift Builds provides the following extensible support for build strategies: - -* Docker build -* Source-to-image (S2I) build -* Custom build - -For more information, see xref:../cicd/builds/understanding-image-builds.adoc#understanding-image-builds[Understanding image builds] - -[id="openshift-pipelines"] -== {pipelines-shortname} -{pipelines-shortname} provides a Kubernetes-native CI/CD framework to design and run each step of the CI/CD pipeline in its own container. It can scale independently to meet the on-demand pipelines with predictable outcomes. - -For more information, see xref:../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}] - -[id="openshift-gitops"] -== OpenShift GitOps -OpenShift GitOps is an Operator that uses Argo CD as the declarative GitOps engine. It enables GitOps workflows across multicluster OpenShift and Kubernetes infrastructure. Using OpenShift GitOps, administrators can consistently configure and deploy Kubernetes-based infrastructure and applications across clusters and development lifecycles. - -For more information, see xref:../cicd/gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps] - -[id="jenkins-ci-cd"] -== Jenkins -Jenkins automates the process of building, testing, and deploying applications and projects. OpenShift Developer Tools provides a Jenkins image that integrates directly with the {product-title}. Jenkins can be deployed on OpenShift by using the Samples Operator templates or certified Helm chart. diff --git a/cicd/jenkins/_attributes b/cicd/jenkins/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/jenkins/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/jenkins/images b/cicd/jenkins/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/jenkins/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/jenkins/images-other-jenkins-agent.adoc b/cicd/jenkins/images-other-jenkins-agent.adoc deleted file mode 100644 index 68f0628b5bf8..000000000000 --- a/cicd/jenkins/images-other-jenkins-agent.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="images-other-jenkins-agent"] -= Jenkins agent -include::_attributes/common-attributes.adoc[] -:context: images-other-jenkins-agent - -toc::[] - -{product-title} provides a base image for use as a Jenkins agent. - -The Base image for Jenkins agents does the following: - -* Pulls in both the required tools, headless Java, the Jenkins JNLP client, and the useful ones, including `git`, `tar`, `zip`, and `nss`, among others. -* Establishes the JNLP agent as the entry point. -* Includes the `oc` client tool for invoking command line operations from within Jenkins jobs. -* Provides Dockerfiles for both Red Hat Enterprise Linux (RHEL) and `localdev` images. - -[IMPORTANT] -==== -Use a version of the agent image that is appropriate for your {product-title} release version. Embedding an `oc` client version that is not compatible with the {product-title} version can cause unexpected behavior. -==== - -The {product-title} Jenkins image also defines the following sample `java-builder` pod template to illustrate how you can use the agent image with the Jenkins Kubernetes plugin. - -The `java-builder` pod template employs two containers: -* A `jnlp` container that uses the {product-title} Base agent image and handles the JNLP contract for starting and stopping Jenkins agents. -* A `java` container that uses the `java` {product-title} Sample ImageStream, which contains the various Java binaries, including the Maven binary `mvn`, for building code. - -include::modules/images-other-jenkins-agent-images.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-env-var.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-memory.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-gradle.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-pod-retention.adoc[leveloffset=+1] diff --git a/cicd/jenkins/images-other-jenkins.adoc b/cicd/jenkins/images-other-jenkins.adoc deleted file mode 100644 index 87aeeabeb003..000000000000 --- a/cicd/jenkins/images-other-jenkins.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="images-other-jenkins"] -= Configuring Jenkins images -include::_attributes/common-attributes.adoc[] -:context: images-other-jenkins - -toc::[] - -{product-title} provides a container image for running Jenkins. This image provides a Jenkins server instance, which can be used to set up a basic flow for continuous testing, integration, and delivery. - -The image is based on the Red Hat Universal Base Images (UBI). - -{product-title} follows the link:https://jenkins.io/changelog-stable/[LTS] release of Jenkins. {product-title} provides an image that contains Jenkins 2.x. - -The {product-title} Jenkins images are available on link:https://quay.io[Quay.io] or link:https://registry.redhat.io[registry.redhat.io]. - -For example: - -[source,terminal] ----- -$ podman pull registry.redhat.io/ocp-tools-4/jenkins-rhel8: ----- - -To use these images, you can either access them directly from these registries or push them into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the image stream. - -But for convenience, {product-title} provides image streams in the `openshift` namespace for the core Jenkins image as well as the example Agent images provided for {product-title} integration with Jenkins. - -[id="images-other-jenkins-config-customization_{context}"] -== Configuration and customization - -You can manage Jenkins authentication in two ways: - -* {product-title} OAuth authentication provided by the {product-title} Login plugin. -* Standard authentication provided by Jenkins. - -include::modules/images-other-jenkins-oauth-auth.adoc[leveloffset=+2] - -include::modules/images-other-jenkins-auth.adoc[leveloffset=+2] - -include::modules/images-other-jenkins-env-var.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-cross-project.adoc[leveloffset=+1] - -[id="images-other-jenkins-cross-volume-mount_{context}"] -== Jenkins cross volume mount points - -The Jenkins image can be run with mounted volumes to enable persistent storage for the configuration: - -* `/var/lib/jenkins` is the data directory where Jenkins stores configuration files, including job definitions. - -include::modules/images-other-jenkins-customize-s2i.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-config-kubernetes.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] - -include::modules/images-other-jenkins-permissions.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-create-service.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-kubernetes-plugin.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] - -include::modules/images-other-jenkins-memory.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* See xref:../../architecture/understanding-development.adoc#base-image-options[Base image options] for more information about the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] diff --git a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc b/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc deleted file mode 100644 index 6590c40201fd..000000000000 --- a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="important-changes-to-openshift-jenkins-images"] -= Important changes to OpenShift Jenkins images -include::_attributes/common-attributes.adoc[] -:context: important-changes-to-openshift-jenkins-images - -toc::[] - -{product-title} 4.11 moves the OpenShift Jenkins and OpenShift Agent Base images to the `ocp-tools-4` repository at `registry.redhat.io`. It also removes the OpenShift Jenkins Maven and NodeJS Agent images from its payload: - -* {product-title} 4.11 moves the OpenShift Jenkins and OpenShift Agent Base images to the `ocp-tools-4` repository at `registry.redhat.io` so that Red Hat can produce and update the images outside the {product-title} lifecycle. Previously, these images were in the {product-title} install payload and the `openshift4` repository at `registry.redhat.io`. - -* {product-title} 4.10 deprecated the OpenShift Jenkins Maven and NodeJS Agent images. {product-title} 4.11 removes these images from its payload. Red Hat no longer produces these images, and they are not available from the `ocp-tools-4` repository at `registry.redhat.io`. Red Hat maintains the 4.10 and earlier versions of these images for any significant bug fixes or security CVEs, following the link:https://access.redhat.com/support/policy/updates/openshift[{product-title} lifecycle policy]. - -These changes support the {product-title} 4.10 recommendation to use xref:../../cicd/jenkins/images-other-jenkins.adoc#images-other-jenkins-config-kubernetes_images-other-jenkins[multiple container Pod Templates with the Jenkins Kubernetes Plugin]. - -include::modules/relocation-of-openshift-jenkins-images.adoc[leveloffset=+1] - -include::modules/customizing-the-jenkins-image-stream-tag.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_important-changes-to-openshift-jenkins-images_{context}"] -== Additional resources - -* xref:../../openshift_images/managing_images/tagging-images.adoc#images-add-tags-to-imagestreams_tagging-images[Adding tags to image streams] -* xref:../../openshift_images/image-streams-manage.adoc#images-imagestream-import_image-streams-managing[Configuring periodic importing of image stream tags] -* xref:../../cicd/jenkins/images-other-jenkins-agent.adoc#images-other-jenkins-agent[Jenkins agent] -* link:https://catalog.redhat.com/software/containers/search?q=Jenkins%202&p=1[Certified `jenkins` images] -* link:https://catalog.redhat.com/software/containers/search?q=Jenkins%20Agent%20Base&p=1[Certified `jenkins-agent-base` images] -* link:https://catalog.redhat.com/software/containers/search?q=jenkins-agent-maven&p=1[Certified `jenkins-agent-maven` images] -// Writer, remove this line in 4.12 -* link:https://catalog.redhat.com/software/containers/search?q=jenkins-agent-nodejs&p=1[Certified `jenkins-agent-nodejs` images] -// Writer, remove this line in 4.12 diff --git a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc b/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc deleted file mode 100644 index 7f8242f2ddbc..000000000000 --- a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -//Jenkins-Tekton-Migration -[id="migrating-from-jenkins-to-openshift-pipelines_{context}"] -= Migrating from Jenkins to {pipelines-shortname} or Tekton -include::_attributes/common-attributes.adoc[] -:context: migrating-from-jenkins-to-openshift-pipelines - -toc::[] - -You can migrate your CI/CD workflows from Jenkins to xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[{pipelines-title}], a cloud-native CI/CD experience based on the Tekton project. - -include::modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc[leveloffset=+1] - -include::modules/jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines.adoc[leveloffset=+1] - -include::modules/jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks.adoc[leveloffset=+1] - -include::modules/jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts.adoc[leveloffset=+1] - -include::modules/jt-comparison-of-jenkins-openshift-pipelines-execution-models.adoc[leveloffset=+1] - -include::modules/jt-examples-of-common-use-cases.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources -* xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}] -* xref:../../authentication/using-rbac.adoc#using-rbac[Role-based Access Control] diff --git a/cicd/jenkins/modules b/cicd/jenkins/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/jenkins/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/jenkins/snippets b/cicd/jenkins/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/jenkins/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/pipelines/_attributes b/cicd/pipelines/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/pipelines/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc b/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc deleted file mode 100644 index 4f575e125149..000000000000 --- a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="authenticating-pipelines-using-git-secret"] -= Authenticating pipelines using git secret -include::_attributes/common-attributes.adoc[] -:context: authenticating-pipelines-using-git-secret - -toc::[] - -A Git secret consists of credentials to securely interact with a Git repository, and is often used to automate authentication. In {pipelines-title}, you can use Git secrets to authenticate pipeline runs and task runs that interact with a Git repository during execution. - -A pipeline run or a task run gains access to the secrets through the associated service account. {pipelines-shortname} support the use of Git secrets as annotations (key-value pairs) for basic authentication and SSH-based authentication. - -include::modules/op-understanding-credential-selection.adoc[leveloffset=+1] - -include::modules/op-configuring-basic-authentication-for-git.adoc[leveloffset=+1] - -include::modules/op-configuring-ssh-authentication-for-git.adoc[leveloffset=+1] - -include::modules/op-using-ssh-authentication-in-git-type-tasks.adoc[leveloffset=+1] - -include::modules/op-using-secrets-as-a-nonroot-user.adoc[leveloffset=+1] - -include::modules/op-limiting-secret-access-to-specific-steps.adoc[leveloffset=+1] diff --git a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc deleted file mode 100644 index 732c493b8662..000000000000 --- a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-applications-with-cicd-pipelines"] -= Creating CI/CD solutions for applications using {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: creating-applications-with-cicd-pipelines - -toc::[] - -With {pipelines-title}, you can create a customized CI/CD solution to build, test, and deploy your application. - -To create a full-fledged, self-serving CI/CD pipeline for an application, perform the following tasks: - -* Create custom tasks, or install existing reusable tasks. -* Create and define the delivery pipeline for your application. -* Provide a storage volume or filesystem that is attached to a workspace for the pipeline execution, using one of the following approaches: -** Specify a volume claim template that creates a persistent volume claim -** Specify a persistent volume claim -* Create a `PipelineRun` object to instantiate and invoke the pipeline. -* Add triggers to capture events in the source repository. - -This section uses the `pipelines-tutorial` example to demonstrate the preceding tasks. The example uses a simple application which consists of: - -* A front-end interface, `pipelines-vote-ui`, with the source code in the link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] Git repository. -* A back-end interface, `pipelines-vote-api`, with the source code in the link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repository. -* The `apply-manifests` and `update-deployment` tasks in the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository. - -== Prerequisites - -* You have access to an {product-title} cluster. -* You have installed xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname}] using the {pipelines-title} Operator listed in the OpenShift OperatorHub. After it is installed, it is applicable to the entire cluster. -* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[{pipelines-shortname} CLI]. -* You have forked the front-end link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] and back-end link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repositories using your GitHub ID, and have administrator access to these repositories. -* Optional: You have cloned the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository. - - -include::modules/op-creating-project-and-checking-pipeline-service-account.adoc[leveloffset=+1] - -include::modules/op-creating-pipeline-tasks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc#managing-nonversioned-and-versioned-cluster-tasks[Managing non-versioned and versioned cluster tasks] - -include::modules/op-assembling-a-pipeline.adoc[leveloffset=+1] - -include::modules/op-mirroring-images-to-run-pipelines-in-restricted-environment.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry] - -include::modules/op-running-a-pipeline.adoc[leveloffset=+1] - -include::modules/op-adding-triggers.adoc[leveloffset=+1] - -include::modules/op-configuring-eventlisteners-to-serve-multiple-namespaces.adoc[leveloffset=+1] - -include::modules/op-creating-webhooks.adoc[leveloffset=+1] - -include::modules/op-triggering-a-pipelinerun.adoc[leveloffset=+1] - -include::modules/op-enabling-monitoring-of-event-listeners-for-triggers-for-user-defined-projects.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] - -include::modules/op-configuring-pull-request-capabilities-in-GitHub-interceptor.adoc[leveloffset=+1] - -include::modules/op-filtering-pull-requests-using-GitHub-interceptor.adoc[leveloffset=+2] - -include::modules/op-validating-pull-requests-using-GitHub-interceptors.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="pipeline-addtl-resources"] -== Additional resources - -* To include {pac} along with the application source code in the same repository, see xref:../../cicd/pipelines/using-pipelines-as-code.adoc#using-pipelines-as-code[Using {pac}]. -* For more details on pipelines in the *Developer* perspective, see the xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#working-with-pipelines-using-the-developer-perspective[working with pipelines in the *Developer* perspective] section. -* To learn more about Security Context Constraints (SCCs), see the xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing Security Context Constraints] section. -* For more examples of reusable tasks, see the link:https://github.com/openshift/pipelines-catalog[OpenShift Catalog] repository. Additionally, you can also see the Tekton Catalog in the Tekton project. -* To install and deploy a custom instance of Tekton Hub for reusable tasks and pipelines, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}]. -* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination]. -* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section. diff --git a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc b/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc deleted file mode 100644 index ed728694b569..000000000000 --- a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="customizing-configurations-in-the-tektonconfig-cr"] -= Customizing configurations in the TektonConfig custom resource -include::_attributes/common-attributes.adoc[] -:context: customizing-configurations-in-the-tektonconfig-cr - -toc::[] - -In {pipelines-title}, you can customize the following configurations by using the `TektonConfig` custom resource (CR): - -* Configuring the {pipelines-title} control plane -* Changing the default service account -* Disabling the service monitor -* Configuring pipeline resolvers -* Disabling cluster tasks and pipeline templates -* Disabling the integration of {tekton-hub} -* Disabling the automatic creation of RBAC resources -* Pruning of task runs and pipeline runs - -[id="prerequisites_customizing-configurations-in-the-tektonconfig-cr"] -== Prerequisites - -* You have installed the {pipelines-title} Operator. - -include::modules/op-configuring-pipelines-control-plane.adoc[leveloffset=+1] - -include::modules/op-modifiable-fields-with-default-values.adoc[leveloffset=+2] - -include::modules/op-optional-configuration-fields.adoc[leveloffset=+2] - -include::modules/op-changing-default-service-account.adoc[leveloffset=+1] - -include::modules/op-disabling-the-service-monitor.adoc[leveloffset=+1] - -include::modules/op-configuring-pipeline-resolvers.adoc[leveloffset=+1] - -include::modules/op-disabling-cluster-tasks-and-pipeline-templates.adoc[leveloffset=+1] - -include::modules/op-disabling-the-integretion-of-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-disabling-automatic-creation-of-rbac-resources.adoc[leveloffset=+1] - -include::modules/op-automatic-pruning-taskrun-pipelinerun.adoc[leveloffset=+1] - -include::modules/op-default-pruner-configuration.adoc[leveloffset=+2] - -include::modules/op-annotations-for-automatic-pruning-taskruns-pipelineruns.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_customizing-configurations-in-the-tektonconfig-cr"] -== Additional resources - -* xref:../../cicd/pipelines/authenticating-pipelines-using-git-secret.adoc#op-configuring-ssh-authentication-for-git_authenticating-pipelines-using-git-secret[Configuring SSH authentication for Git] -* xref:../../cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc#managing-nonversioned-and-versioned-cluster-tasks[Managing non-versioned and versioned cluster tasks] -* xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#using-custom-pipeline-template-for-git-import_working-with-pipelines-using-the-developer-perspective[Using a custom pipeline template for creating and deploying an application from a Git repository] -* xref:../../applications/pruning-objects.adoc#pruning-objects[Pruning objects to reclaim resources] diff --git a/cicd/pipelines/images b/cicd/pipelines/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/pipelines/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/pipelines/installing-pipelines.adoc b/cicd/pipelines/installing-pipelines.adoc deleted file mode 100644 index 250cd085df55..000000000000 --- a/cicd/pipelines/installing-pipelines.adoc +++ /dev/null @@ -1,58 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-pipelines"] -= Installing {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: installing-pipelines - -toc::[] - -[role="_abstract"] -This guide walks cluster administrators through the process of installing the {pipelines-title} Operator to an {product-title} cluster. - -// Prerequisites for installing OpenShift Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed `oc` CLI. -* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[{pipelines-shortname} (`tkn`) CLI] on your local system. -* Your cluster has the xref:../../installing/cluster-capabilities.adoc#marketplace-operator_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in the xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-obtaining-installer_installing-gcp-customizations[Obtaining the installation program] to install this Operator. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in xref:../../post_installation_configuration/preparing-for-users.adoc#olm-installing-operators-from-operatorhub-configure_post-install-preparing-for-users[Configuring {product-title} to use Red Hat Operators]. -endif::[] - - -//Installing pipelines Operator using web console - -include::modules/op-installing-pipelines-operator-in-web-console.adoc[leveloffset=+1] - -// Installing pipelines Operator using CLI - -include::modules/op-installing-pipelines-operator-using-the-cli.adoc[leveloffset=+1] - -// {pipelines-title} Operator in a restricted environment - -include::modules/op-pipelines-operator-in-restricted-environment.adoc[leveloffset=+1] - -include::modules/op-performance-tuning-using-tektonconfig-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* You can learn more about installing Operators on {product-title} in the xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[adding Operators to a cluster] section. - -* To install {tekton-chains} using the {pipelines-title} Operator, see xref:../../cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc#using-tekton-chains-for-openshift-pipelines-supply-chain-security[Using {tekton-chains} for {pipelines-title} supply chain security]. - -* To install and deploy in-cluster {tekton-hub}, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}]. - -* For more information on using pipelines in a restricted environment, see: - -** xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.adoc#op-mirroring-images-to-run-pipelines-in-restricted-environment_creating-applications-with-cicd-pipelines[Mirroring images to run pipelines in a restricted environment] - -** xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster] - -** xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry] - diff --git a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc b/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc deleted file mode 100644 index ed9a3f96c984..000000000000 --- a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-nonversioned-and-versioned-cluster-tasks"] -= Managing non-versioned and versioned cluster tasks -include::_attributes/common-attributes.adoc[] -:context: managing-nonversioned-and-versioned-cluster-tasks - -toc::[] - -As a cluster administrator, installing the {pipelines-title} Operator creates variants of each default cluster task known as _versioned cluster tasks_ (VCT) and _non-versioned cluster tasks_ (NVCT). For example, installing the {pipelines-title} Operator v1.7 creates a `buildah-1-7-0` VCT and a `buildah` NVCT. - -Both NVCT and VCT have the same metadata, behavior, and specifications, including `params`, `workspaces`, and `steps`. However, they behave differently when you disable them or upgrade the Operator. - -[IMPORTANT] -==== -In {pipelines-title} 1.10, cluster task functionality is deprecated and is planned to be removed in a future release. -==== - -include::modules/op-differences-between-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] - -include::modules/op-advantages-and-disadvantages-of-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] - -include::modules/op-disabling-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] diff --git a/cicd/pipelines/modules b/cicd/pipelines/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/pipelines/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/pipelines/op-release-notes.adoc b/cicd/pipelines/op-release-notes.adoc deleted file mode 100644 index 3784aff18fc5..000000000000 --- a/cicd/pipelines/op-release-notes.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -//OpenShift Pipelines Release Notes -include::_attributes/common-attributes.adoc[] -[id="op-release-notes"] -= {pipelines-title} release notes -:context: op-release-notes - -toc::[] - -{pipelines-title} is a cloud-native CI/CD experience based on the Tekton project which provides: - -* Standard Kubernetes-native pipeline definitions (CRDs). -* Serverless pipelines with no CI server management overhead. -* Extensibility to build images using any Kubernetes tool, such as S2I, Buildah, JIB, and Kaniko. -* Portability across any Kubernetes distribution. -* Powerful CLI for interacting with pipelines. -* Integrated user experience with the *Developer* perspective of the {product-title} web console. - -For an overview of {pipelines-title}, see xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}]. - -include::modules/op-tkn-pipelines-compatibility-support-matrix.adoc[leveloffset=+1] - -include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] - -// Modules included, most to least recent -include::modules/op-release-notes-1-11.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-10.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-9.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-8.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-7.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-6.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-5.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-4.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-3.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-2.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-1.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-0.adoc[leveloffset=+1] diff --git a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc deleted file mode 100644 index 32da0b15039d..000000000000 --- a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="reducing-pipelines-resource-consumption"] -= Reducing resource consumption of {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: reducing-pipelines-resource-consumption - -toc::[] - -If you use clusters in multi-tenant environments you must control the consumption of CPU, memory, and storage resources for each project and Kubernetes object. This helps prevent any one application from consuming too many resources and affecting other applications. - -To define the final resource limits that are set on the resulting pods, {pipelines-title} use resource quota limits and limit ranges of the project in which they are executed. - -To restrict resource consumption in your project, you can: - -* xref:../../applications/quotas/quotas-setting-per-project.adoc[Set and manage resource quotas] to limit the aggregate resource consumption. -* Use xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc[limit ranges to restrict resource consumption] for specific objects, such as pods, images, image streams, and persistent volume claims. - -include::modules/op-understanding-pipelines-resource-consumption.adoc[leveloffset=+1] - -include::modules/op-mitigating-extra-pipeline-resource-consumption.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_reducing-pipelines-resource-consumption"] -== Additional resources - -* xref:../../cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc#setting-compute-resource-quota-for-openshift-pipelines[Setting compute resource quota for {pipelines-shortname}] -* xref:../../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[Resource quotas per project] -* xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc#nodes-cluster-limit-ranges[Restricting resource consumption using limit ranges] -* link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources[Resource requests and limits in Kubernetes] diff --git a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc b/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc deleted file mode 100644 index 80ca525603ed..000000000000 --- a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="remote-pipelines-tasks-resolvers"] -= Specifying remote pipelines and tasks using resolvers -include::_attributes/common-attributes.adoc[] -:context: remote-pipelines-tasks-resolvers - -toc::[] - -Pipelines and tasks are reusable blocks for your CI/CD processes. You can reuse pipelines or tasks that you previously developed, or that were developed by others, without having to copy and paste their definitions. These pipelines or tasks can be available from several types of sources, from other namespaces on your cluster to public catalogs. - -In a pipeline run resource, you can specify a pipeline from an existing source. In a pipeline resource or a task run resource, you can specify a task from an existing source. - -In these cases, the _resolvers_ in {pipelines-title} retrieve the pipeline or task definition from the specified source at run time. - -The following resolvers are available in a default installaton of {pipelines-title}: - -Hub resolver:: Retrieves a task or pipeline from the Pipelines Catalog available on {artifact-hub} or {tekton-hub}. -Bundles resolver:: Retrieves a task or pipeline from a Tekton bundle, which is an OCI image available from any OCI repository, such as an OpenShift container repository. -Cluster resolver:: Retrieves a task or pipeline that is already created on the same {product-title} cluster in a specific namespace. -Git resolver:: Retrieves a task or pipeline binding from a Git repository. You must specify the repository, the branch, and the path. - -[id="resolver-hub_{context}"] -== Specifying a remote pipeline or task from a Tekton catalog -You can specify a remote pipeline or task that is defined in a public Tekton catalog, either link:https://artifacthub.io/[{artifact-hub}] or link:https://hub.tekton.dev/[{tekton-hub}], by using the hub resolver. - -include::modules/op-resolver-hub-config.adoc[leveloffset=+2] -include::modules/op-resolver-hub.adoc[leveloffset=+2] - -[id="resolver-bundles_{context}"] -== Specifying a remote pipeline or task from a Tekton bundle - -You can specify a remote pipeline or task from a Tekton bundle by using the bundles resolver. A Tekton bundle is an OCI image available from any OCI repository, such as an OpenShift container repository. - -include::modules/op-resolver-bundle-config.adoc[leveloffset=+2] -include::modules/op-resolver-bundle.adoc[leveloffset=+2] - -[id="resolver-cluster_{context}"] -== Specifying a remote pipeline or task from the same cluster - -You can specify a remote pipeline or task that is defined in a namespace on the {product-title} cluster where {pipelines-title} is running by using the cluster resolver. - -include::modules/op-resolver-cluster-config.adoc[leveloffset=+2] -include::modules/op-resolver-cluster.adoc[leveloffset=+2] - -[id="resolver-git_{context}"] -== Specifying a remote pipeline or task from a Git repository - -You can specify a remote pipeline or task from a Git repostory by using the Git resolver. The repository must contain a YAML file that defines the pipeline or task. The Git resolver can access a repository either by cloning it anonymously or else by using the authenticated SCM API. - -include::modules/op-resolver-git-config-anon.adoc[leveloffset=+2] -include::modules/op-resolver-git-config-scm.adoc[leveloffset=+2] -include::modules/op-resolver-git.adoc[leveloffset=+2] diff --git a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc deleted file mode 100644 index 338c5d82c53f..000000000000 --- a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="securing-webhooks-with-event-listeners"] -= Securing webhooks with event listeners -include::_attributes/common-attributes.adoc[] -:context: securing-webhooks-with-event-listeners - -toc::[] - -As an administrator, you can secure webhooks with event listeners. After creating a namespace, you enable HTTPS for the `Eventlistener` resource by adding the `operator.tekton.dev/enable-annotation=enabled` label to the namespace. Then, you create a `Trigger` resource and a secured route using the re-encrypted TLS termination. - -Triggers in {pipelines-title} support insecure HTTP and secure HTTPS connections to the `Eventlistener` resource. HTTPS secures connections within and outside the cluster. - -{pipelines-title} runs a `tekton-operator-proxy-webhook` pod that watches for the labels in the namespace. When you add the label to the namespace, the webhook sets the `service.beta.openshift.io/serving-cert-secret-name=` annotation on the `EventListener` object. This, in turn, creates secrets and the required certificates. - -[source,terminal,subs="attributes+"] ----- -service.beta.openshift.io/serving-cert-secret-name= ----- - -In addition, you can mount the created secret into the `Eventlistener` pod to secure the request. - -include::modules/op-providing-secure-connection.adoc[leveloffset=+1] - -include::modules/op-sample-eventlistener-resource.adoc[leveloffset=+1] diff --git a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc b/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc deleted file mode 100644 index 293db99e61cf..000000000000 --- a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-compute-resource-quota-for-openshift-pipelines"] -= Setting compute resource quota for {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: setting-compute-resource-quota-for-openshift-pipelines - -toc::[] - -A `ResourceQuota` object in {pipelines-title} controls the total resource consumption per namespace. You can use it to limit the quantity of objects created in a namespace, based on the type of the object. In addition, you can specify a compute resource quota to restrict the total amount of compute resources consumed in a namespace. - -However, you might want to limit the amount of compute resources consumed by pods resulting from a pipeline run, rather than setting quotas for the entire namespace. Currently, {pipelines-title} does not enable you to directly specify the compute resource quota for a pipeline. - -include::modules/op-alternative-approaches-compute-resource-quota-pipelines.adoc[leveloffset=+1] - -[NOTE] -==== -When using {pipelines-title} in a namespace configured with a `ResourceQuota` object, the pods resulting from task runs and pipeline runs might fail with an error, such as: `failed quota: must specify cpu, memory`. - -To avoid this error, do any one of the following: - -* (Recommended) Specify a limit range for the namespace. -* Explicitly define requests and limits for all containers. - -For more information, refer to the link:https://issues.redhat.com/browse/SRVKP-1801[issue] and the link:https://access.redhat.com/solutions/2841971[resolution]. -==== - -If your use case is not addressed by these approaches, you can implement a workaround by using a resource quota for a priority class. - -include::modules/op-specifying-pipelines-resource-quota-using-priority-class.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_setting-compute-resource-quota-for-pipelines"] -== Additional resources - -* xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc#nodes-cluster-limit-ranges[Restrict resource consumption with limit ranges] -* link:https://kubernetes.io/docs/concepts/policy/resource-quotas/[Resource quotas in Kubernetes] -* link:https://kubernetes.io/docs/concepts/policy/limit-range/[Limit ranges in Kubernetes] -* link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources[Resource requests and limits in Kubernetes] diff --git a/cicd/pipelines/snippets b/cicd/pipelines/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/pipelines/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/pipelines/understanding-openshift-pipelines.adoc b/cicd/pipelines/understanding-openshift-pipelines.adoc deleted file mode 100644 index 7aa1be6a3f91..000000000000 --- a/cicd/pipelines/understanding-openshift-pipelines.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-openshift-pipelines"] -= Understanding {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: understanding-openshift-pipelines - -toc::[] - -:FeatureName: OpenShift Pipelines - -{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple platforms by abstracting away the underlying implementation details. Tekton introduces a number of standard custom resource definitions (CRDs) for defining CI/CD pipelines that are portable across Kubernetes distributions. - -[id="op-key-features"] -== Key features - -* {pipelines-title} is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers. -* {pipelines-title} are designed for decentralized teams that work on microservice-based architecture. -* {pipelines-title} use standard CI/CD pipeline definitions that are easy to extend and integrate with the existing Kubernetes tools, enabling you to scale on-demand. -* You can use {pipelines-title} to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform. -* You can use the {product-title} web console *Developer* perspective to create Tekton resources, view logs of pipeline runs, and manage pipelines in your {product-title} namespaces. - -[id="op-detailed-concepts"] -== {pipelines-shortname} Concepts -This guide provides a detailed view of the various pipeline concepts. - -//About tasks -include::modules/op-about-tasks.adoc[leveloffset=+2] -//About when expression -include::modules/op-about-whenexpression.adoc[leveloffset=+2] -//About final tasks -include::modules/op-about-finally_tasks.adoc[leveloffset=+2] -//About task run -include::modules/op-about-taskrun.adoc[leveloffset=+2] -//About pipelines -include::modules/op-about-pipelines.adoc[leveloffset=+2] -//About pipeline run -include::modules/op-about-pipelinerun.adoc[leveloffset=+2] -//About workspace -include::modules/op-about-workspace.adoc[leveloffset=+2] -//About triggers -include::modules/op-about-triggers.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* For information on installing {pipelines-shortname}, see xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}]. -* For more details on creating custom CI/CD solutions, see xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.adoc#creating-applications-with-cicd-pipelines[Creating CI/CD solutions for applications using {pipelines-shortname}]. -* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination]. -* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section. diff --git a/cicd/pipelines/uninstalling-pipelines.adoc b/cicd/pipelines/uninstalling-pipelines.adoc deleted file mode 100644 index c3489bd15c38..000000000000 --- a/cicd/pipelines/uninstalling-pipelines.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-pipelines"] -= Uninstalling {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: uninstalling-pipelines - -toc::[] - -Cluster administrators can uninstall the {pipelines-title} Operator by performing the following steps: - -. Delete the Custom Resources (CRs) that were added by default when you installed the {pipelines-title} Operator. -. Delete the CRs of the optional components, such as {tekton-chains}, that are dependent on the Operator. -+ -[CAUTION] -==== -If you uninstall the Operator without removing the CRs of optional components, you cannot remove them later. -==== -. Uninstall the {pipelines-title} Operator. - -Uninstalling only the Operator will not remove the {pipelines-title} components created by default when the Operator is installed. - -include::modules/op-deleting-the-pipelines-component-and-custom-resources.adoc[leveloffset=+1] - -include::modules/op-uninstalling-the-pipelines-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster] section. diff --git a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc b/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc deleted file mode 100644 index 48734d9ea68c..000000000000 --- a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="unprivileged-building-of-container-images-using-buildah"] -= Building of container images using Buildah as a non-root user -include::_attributes/common-attributes.adoc[] -:context: unprivileged-building-of-container-images-using-buildah - -toc::[] - -Running {pipelines-shortname} as the root user on a container can expose the container processes and the host to other potentially malicious resources. You can reduce this type of exposure by running the workload as a specific non-root user in the container. To run builds of container images using Buildah as a non-root user, you can perform the following steps: - -* Define custom service account (SA) and security context constraint (SCC). -* Configure Buildah to use the `build` user with id `1000`. -* Start a task run with a custom config map, or integrate it with a pipeline run. - -include::modules/op-configuring-custom-sa-and-scc.adoc[leveloffset=+1] -include::modules/op-configuring-buildah-to-use-build-user.adoc[leveloffset=+1] -include::modules/op-starting-a-task-run-pipeline-run-build-user.adoc[leveloffset=+1] -include::modules/op-limitations-of-unprivileged-builds.adoc[leveloffset=+1] - - -.Additional resources - -* xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints (SCCs)] diff --git a/cicd/pipelines/using-pipelines-as-code.adoc b/cicd/pipelines/using-pipelines-as-code.adoc deleted file mode 100644 index ddf4f6948ee9..000000000000 --- a/cicd/pipelines/using-pipelines-as-code.adoc +++ /dev/null @@ -1,148 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-pipelines-as-code"] -= Using {pac} -include::_attributes/common-attributes.adoc[] -:context: using-pipelines-as-code - -toc::[] - - -// :FeatureName: Pipelines as Code -[role="_abstract"] -With {pac}, cluster administrators and users with the required privileges can define pipeline templates as part of source code Git repositories. When triggered by a source code push or a pull request for the configured Git repository, {pac} runs the pipeline and reports the status. - -[id="pac-key-features"] -== Key features -{pac} supports the following features: - -* Pull request status and control on the platform hosting the Git repository. -* GitHub Checks API to set the status of a pipeline run, including rechecks. -* GitHub pull request and commit events. -* Pull request actions in comments, such as `/retest`. -* Git events filtering and a separate pipeline for each event. -* Automatic task resolution in {pipelines-shortname}, including local tasks, Tekton Hub, and remote URLs. -* Retrieval of configurations using GitHub blobs and objects API. -* Access Control List (ACL) over a GitHub organization, or using a Prow style `OWNER` file. -* The `tkn pac` CLI plugin for managing bootstrapping and {pac} repositories. -* Support for GitHub App, GitHub Webhook, Bitbucket Server, and Bitbucket Cloud. - -include::modules/op-installing-pipelines-as-code-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/op-installing-pipelines-as-code-cli.adoc[leveloffset=+1] - -[id="using-pipelines-as-code-with-a-git-repository-hosting-service-provider"] -== Using {pac} with a Git repository hosting service provider - -[role="_abstract"] -After installing {pac}, cluster administrators can configure a Git repository hosting service provider. Currently, the following services are supported: - -* GitHub App -* GitHub Webhook -* GitLab -* Bitbucket Server -* Bitbucket Cloud - -[NOTE] -==== -GitHub App is the recommended service for using with {pac}. -==== - -include::modules/op-using-pipelines-as-code-with-a-github-app.adoc[leveloffset=+1] - -include::modules/op-creating-a-github-application-in-administrator-perspective.adoc[leveloffset=+2] - -include::modules/op-scoping-github-token.adoc[leveloffset=+2] - -include::modules/op-using-pipelines-as-code-with-github-webhook.adoc[leveloffset=+1] - -.Additional resources - -* link:https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks[GitHub Webhook documentation on GitHub] -* link:https://docs.github.com/en/rest/guides/getting-started-with-the-checks-api[GitHub Check Runs documentation on GitHub] -* link:https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token[Creating a personal access token on GitHub] -* link:https://github.com/settings/tokens/new?description=pipelines-as-code-token&scopes=repo[Classic tokens with pre-filled permissions] - -include::modules/op-using-pipelines-as-code-with-gitlab.adoc[leveloffset=+1] - -.Additional resources - -* link:https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html[GitLab Webhook documentation on GitLab] - -include::modules/op-using-pipelines-as-code-with-bitbucket-cloud.adoc[leveloffset=+1] - -.Additional resources - -* link:https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/[Creating app password on Bitbucket Cloud] -* link:https://developer.atlassian.com/cloud/bitbucket/bitbucket-api-changes-gdpr/#introducing-atlassian-account-id-and-nicknames[Introducing Altassian Account ID and Nicknames] - -include::modules/op-using-pipelines-as-code-with-bitbucket-server.adoc[leveloffset=+1] - -.Additional resources - -* link:https://confluence.atlassian.com/bitbucketserver/personal-access-tokens-939515499.html[Creating personal tokens on Bitbucket Server] -* link:https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/#Create-webhooks[Creating webhooks on Bitbucket server] - -include::modules/op-interfacing-pipelines-as-code-with-custom-certificates.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object[Enabling the cluster-wide proxy] - -include::modules/op-using-repository-crd-with-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-setting-concurrency-limits-in-repository-crd.adoc[leveloffset=+2] - -include::modules/op-changing-source-branch-in-repository-crd.adoc[leveloffset=+2] - -include::modules/op-custom-parameter-expansion.adoc[leveloffset=+2] - -include::modules/op-using-pipelines-as-code-resolver.adoc[leveloffset=+1] - -include::modules/op-using-remote-task-annotations-with-pipelines-as-code.adoc[leveloffset=+2] - -include::modules/op-using-remote-pipeline-annotations-with-pipelines-as-code.adoc[leveloffset=+2] - -include::modules/op-creating-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/google/cel-spec/blob/master/doc/langdef.md[CEL language specification] - -include::modules/op-running-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-monitoring-pipeline-run-status-using-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/chmouel/tekton-slack-task-status[An example task to send Slack messages on success or failure] -* link:https://github.com/openshift-pipelines/pipelines-as-code/blob/7b41cc3f769af40a84b7ead41c6f037637e95070/.tekton/push.yaml[An example of a pipeline run with `finally` tasks triggered on push events] - -include::modules/op-using-private-repositories-with-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/openshift-pipelines/pipelines-as-code/blob/main/test/testdata/pipelinerun_git_clone_private.yaml[An example of the `git-clone` task used for cloning private repositories] - -include::modules/op-cleaning-up-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-using-incoming-webhook-with-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-customizing-pipelines-as-code-configuration.adoc[leveloffset=+1] - -include::modules/op-pipelines-as-code-command-reference.adoc[leveloffset=+1] - -include::modules/op-splitting-pipelines-as-code-logs-by-namespace.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-pac"] -== Additional resources - -* link:https://github.com/openshift-pipelines/pipelines-as-code/tree/main/.tekton[An example of the `.tekton/` directory in the Pipelines as Code repository] - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] - -* xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[Installing tkn] - -* xref:../../cicd/pipelines/op-release-notes.adoc#op-release-notes[{pipelines-title} release notes] - -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-the-developer-perspective[Creating applications using the Developer perspective] diff --git a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc deleted file mode 100644 index 323b4b627d52..000000000000 --- a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-pods-in-a-privileged-security-context"] -= Using pods in a privileged security context -include::_attributes/common-attributes.adoc[] -:context: using-pods-in-a-privileged-security-context - -toc::[] - -The default configuration of {pipelines-shortname} 1.3.x and later versions does not allow you to run pods with privileged security context, if the pods result from pipeline run or task run. -For such pods, the default service account is `pipeline`, and the security context constraint (SCC) associated with the `pipeline` service account is `pipelines-scc`. The `pipelines-scc` SCC is similar to the `anyuid` SCC, but with minor differences as defined in the YAML file for the SCC of pipelines: - -.Example `pipelines-scc.yaml` snippet -[source,yaml,subs="attributes+"] ----- -apiVersion: security.openshift.io/v1 -kind: SecurityContextConstraints -... -allowedCapabilities: - - SETFCAP -... -fsGroup: - type: MustRunAs -... ----- - -In addition, the `Buildah` cluster task, shipped as part of the {pipelines-shortname}, uses `vfs` as the default storage driver. - -include::modules/op-running-pipeline-and-task-run-pods-with-privileged-security-context.adoc[leveloffset=+1] - -include::modules/op-running-pipeline-run-and-task-run-with-custom-scc-and-service-account.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-references_using-pods-in-a-privileged-security-context"] -== Additional resources - -* For information on managing SCCs, refer to xref:../../authentication/managing-security-context-constraints.adoc[Managing security context constraints]. diff --git a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc b/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc deleted file mode 100644 index a1c864d12d04..000000000000 --- a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-tekton-chains-for-openshift-pipelines-supply-chain-security"] -= Using Tekton Chains for {pipelines-shortname} supply chain security -include::_attributes/common-attributes.adoc[] -:context: using-tekton-chains-for-openshift-pipelines-supply-chain-security - -toc::[] - -[role="_abstract"] -{tekton-chains} is a Kubernetes Custom Resource Definition (CRD) controller. You can use it to manage the supply chain security of the tasks and pipelines created using {pipelines-title}. - -By default, {tekton-chains} observes all task run executions in your {product-title} cluster. When the task runs complete, {tekton-chains} takes a snapshot of the task runs. It then converts the snapshot to one or more standard payload formats, and finally signs and stores all artifacts. - -To capture information about task runs, {tekton-chains} uses `Result` objects. When the objects are unavailable, {tekton-chains} the URLs and qualified digests of the OCI images. - -[id="tc-key-features"] -== Key features -* You can sign task runs, task run results, and OCI registry images with cryptographic keys that are generated by tools such as `cosign` and `skopeo`. -* You can use attestation formats such as `in-toto`. -* You can securely store signatures and signed artifacts using OCI repository as a storage backend. - -include::modules/op-configuring-tekton-chains.adoc[leveloffset=+1] - -include::modules/op-supported-parameters-tekton-chains-configuration.adoc[leveloffset=+2] - -include::modules/op-signing-secrets-in-tekton-chains.adoc[leveloffset=+1] -include::modules/op-chains-signing-secrets-cosign.adoc[leveloffset=+2] -include::modules/op-chains-signing-secrets-skopeo.adoc[leveloffset=+2] -include::modules/op-chains-resolving-existing-secret.adoc[leveloffset=+2] - -include::modules/op-authenticating-to-an-oci-registry.adoc[leveloffset=+1] - -include::modules/op-creating-and-verifying-task-run-signatures-without-any-additional-authentication.adoc[leveloffset=+1] -=== Additional resources - -* xref:signing-secrets-in-tekton-chains_{context}[] -* xref:configuring-tekton-chains_{context}[] - -include::modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-tekton-chains"] -== Additional resources - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] diff --git a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc b/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc deleted file mode 100644 index c4b1c5f07da0..000000000000 --- a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-tekton-hub-with-openshift-pipelines"] -= Using Tekton Hub with {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: using-tekton-hub-with-openshift-pipelines - -toc::[] - -:FeatureName: Tekton Hub -include::snippets/technology-preview.adoc[] - -[role="_abstract"] -{tekton-hub} helps you discover, search, and share reusable tasks and pipelines for your CI/CD workflows. A public instance of {tekton-hub} is available at link:https://hub.tekton.dev/[hub.tekton.dev]. Cluster administrators can also install and deploy a custom instance of {tekton-hub} by modifying the configurations in the `TektonHub` custom resource (CR). - -include::modules/op-installing-and-deploying-tekton-hub-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/op-installing-tekton-hub-without-login-and-rating.adoc[leveloffset=+2] - -include::modules/op-installing-tekton-hub-with-login-and-rating.adoc[leveloffset=+2] - -include::modules/op-using-a-custom-database-in-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-installing-crunchy-postgres-database-and-tekton-hub.adoc[leveloffset=+2] - -include::modules/op-migrating-tekton-hub-data-to-an-existing-crunchy-postgres-database.adoc[leveloffset=+2] - -include::modules/op-updating-tekton-hub-with-custom-categories-and-catalogs.adoc[leveloffset=+1] - -include::modules/op-modifying-catalog-refresh-interval-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-adding-new-users-in-tekton-hub-configuration.adoc[leveloffset=+1] - -include::modules/op-disabling-tekton-hub-authorization-after-upgrade.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-tekton-hub"] -== Additional resources - -* GitHub repository of link:https://github.com/tektoncd/hub[Tekton Hub] - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] - -* xref:../../cicd/pipelines/op-release-notes.adoc#op-release-notes[{pipelines-title} release notes] \ No newline at end of file diff --git a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc deleted file mode 100644 index 0a4b75bc2866..000000000000 --- a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="viewing-pipeline-logs-using-the-openshift-logging-operator"] -= Viewing pipeline logs using the OpenShift Logging Operator -include::_attributes/common-attributes.adoc[] -:context: viewing-pipeline-logs-using-the-openshift-logging-operator - -toc::[] - -The logs generated by pipeline runs, task runs, and event listeners are stored in their respective pods. It is useful to review and analyze logs for troubleshooting and audits. - -However, retaining the pods indefinitely leads to unnecessary resource consumption and cluttered namespaces. - -To eliminate any dependency on the pods for viewing pipeline logs, you can use the OpenShift Elasticsearch Operator and the OpenShift Logging Operator. These Operators help you to view pipeline logs by using the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Elasticsearch Kibana] stack, even after you have deleted the pods that contained the logs. - -[id="prerequisites_viewing-pipeline-logs-using-the-openshift-logging-operator"] -== Prerequisites - -Before trying to view pipeline logs in a Kibana dashboard, ensure the following: - -* The steps are performed by a cluster administrator. -* Logs for pipeline runs and task runs are available. -* The OpenShift Elasticsearch Operator and the OpenShift Logging Operator are installed. - -include::modules/op-viewing-pipeline-logs-in-kibana.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_viewing-pipeline-logs-using-the-openshift-logging-operator"] -== Additional resources - -* xref:../../logging/cluster-logging-deploying.adoc[Installing OpenShift Logging] -* xref:../../logging/viewing-resource-logs.adoc[Viewing logs for a resource] -* xref:../../logging/cluster-logging-visualizer.adoc[Viewing cluster logs by using Kibana] diff --git a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc b/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc deleted file mode 100644 index f81e5c6f689b..000000000000 --- a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-pipelines-using-the-developer-perspective"] -= Working with {pipelines-title} using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: working-with-pipelines-using-the-developer-perspective - -toc::[] - -[role="_abstract"] -You can use the *Developer* perspective of the {product-title} web console to create CI/CD pipelines for your software delivery process. - -In the *Developer* perspective: - -* Use the *Add* -> *Pipeline* -> *Pipeline builder* option to create customized pipelines for your application. -* Use the *Add* -> *From Git* option to create pipelines using operator-installed pipeline templates and resources while creating an application on {product-title}. - -After you create the pipelines for your application, you can view and visually interact with the deployed pipelines in the *Pipelines* view. You can also use the *Topology* view to interact with the pipelines created using the *From Git* option. You must apply custom labels to pipelines created using the *Pipeline builder* to see them in the *Topology* view. - -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -* You have the xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname} Operator installed] in your cluster. -* You are a cluster administrator or a user with create and edit permissions. -* You have created a project. - - -include::modules/op-constructing-pipelines-using-pipeline-builder.adoc[leveloffset=+1] - -include::modules/op-creating-pipelines-along-with-applications.adoc[leveloffset=+1] - -include::modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc[leveloffset=+1] - -include::modules/op-interacting-with-pipelines-using-the-developer-perspective.adoc[leveloffset=+1] - -include::modules/op-using-custom-pipeline-template-for-git-import.adoc[leveloffset=+1] - -include::modules/op-starting-pipelines-from-pipelines-view.adoc[leveloffset=+1] - -include::modules/op-starting-pipelines-from-topology-view.adoc[leveloffset=+1] - -include::modules/op-interacting-pipelines-from-topology-view.adoc[leveloffset=+1] - -include::modules/op-editing-pipelines.adoc[leveloffset=+1] - -include::modules/op-deleting-pipelines.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-working-with-pipelines-using-the-developer-perspective"] -== Additional resources - -* xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using Tekton Hub with {pipelines-shortname}] diff --git a/cicd/snippets b/cicd/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/cicd/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/cli_reference/_attributes b/cli_reference/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cli_reference/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/_attributes b/cli_reference/developer_cli_odo/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/developer_cli_odo/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/attributes b/cli_reference/developer_cli_odo/attributes deleted file mode 120000 index 5b32de1e01e7..000000000000 --- a/cli_reference/developer_cli_odo/attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc deleted file mode 100644 index db390d2e31ca..000000000000 --- a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc +++ /dev/null @@ -1,27 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='configuring-the-odo-cli'] -= Configuring the odo CLI -include::_attributes/common-attributes.adoc[] -:context: configuring-the-odo-cli - -toc::[] - -// Comment out per https://issues.redhat.com/browse/RHDEVDOCS-3594 -// include::modules/developer-cli-odo-using-command-completion.adoc[leveloffset=+1] - -You can find the global settings for `odo` in the `preference.yaml` file which is located by default in your `$HOME/.odo` directory. - -You can set a different location for the `preference.yaml` file by exporting the `GLOBALODOCONFIG` variable. - -// view config -include::modules/developer-cli-odo-view-config.adoc[leveloffset=+1] -// set key -include::modules/developer-cli-odo-set-config.adoc[leveloffset=+1] -// unset key -include::modules/developer-cli-odo-unset-config.adoc[leveloffset=+1] -// preference ref table -include::modules/developer-cli-odo-preference-table.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-ignoring-files-or-patterns.adoc[leveloffset=+1] -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc deleted file mode 100644 index cfa8616d9b80..000000000000 --- a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-instances-of-services-managed-by-operators] -= Creating instances of services managed by Operators -include::_attributes/common-attributes.adoc[] -:context: creating-instances-of-services-managed-by-operators - -toc::[] - -Operators are a method of packaging, deploying, and managing Kubernetes services. With `{odo-title}`, you can create instances of services from the custom resource definitions (CRDs) provided by the Operators. You can then use these instances in your projects and link them to your components. - -To create services from an Operator, you must ensure that the Operator has valid values defined in its `metadata` to start the requested service. `{odo-title}` uses the `metadata.annotations.alm-examples` YAML file of an Operator to start -the service. If this YAML has placeholder values or sample values, a service cannot start. You can modify the YAML file and start the service with the modified values. To learn how to modify YAML files and start services from it, see xref:../../cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc#creating-services-from-yaml-files_creating-instances-of-services-managed-by-operators[Creating services from YAML files]. - -== Prerequisites -* Install the `oc` CLI and log in to the cluster. -** Note that the configuration of the cluster determines the services available to you. To access the Operator services, a cluster administrator must install the respective Operator on the cluster first. To learn more, see xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Adding Operators to the cluster]. -* Install the `{odo-title}` CLI. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] -include::modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-a-service-from-an-operator.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-services-from-yaml-files.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc deleted file mode 100644 index e30e041083b7..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-a-java-application-with-a-database] -= Creating a Java application with a database -include::_attributes/common-attributes.adoc[] -:context: creating-a-java-application-with-a-database -toc::[] - -This example describes how to deploy a Java application by using devfile and connect it to a database service. - -.Prerequisites - -* A running cluster. -* `{odo-title}` is installed. -* A Service Binding Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub]. -* A Dev4Devs PostgreSQL Operator Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub]. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-database-with-odo.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc deleted file mode 100644 index 6904fe98e44d..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='creating-a-multicomponent-application-with-odo'] -= Creating a multicomponent application with `{odo-title}` -:context: creating-a-multicomponent-application-with-odo - -toc::[] - -`{odo-title}` allows you to create a multicomponent application, modify it, and link its components in an easy and automated way. - -This example describes how to deploy a multicomponent application - a shooter game. The application consists of a front-end Node.js component and a back-end Java component. - -.Prerequisites - -* `{odo-title}` is installed. -* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. -* Maven is installed. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-back-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-linking-both-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-exposing-the-components-to-the-public.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-modifying-the-running-application.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc deleted file mode 100644 index a5cbc0653c55..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='creating-a-single-component-application-with-odo'] -= Creating a single-component application with {odo-title} - -:context: creating-a-single-component-application-with-odo - -toc::[] - -With `{odo-title}`, you can create and deploy applications on clusters. - -.Prerequisites - -* `{odo-title}` is installed. -* You have a running cluster. You can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-modifying-your-application-code.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc deleted file mode 100644 index af8e2d948469..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-an-application-with-a-database] -= Creating an application with a database -include::_attributes/common-attributes.adoc[] -:context: creating-an-application-with-a-database - -toc::[] - -This example describes how to deploy and connect a database to a front-end application. - -.Prerequisites - -* `{odo-title}` is installed. -* `oc` client is installed. -* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. -* The Service Catalog is installed and enabled on your cluster. -+ -[NOTE] -==== -Service Catalog is deprecated on {product-title} 4 and later. -==== - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-a-database-manually.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-the-database.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc deleted file mode 100644 index 67d9f4478a59..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='debugging-applications-in-odo'] -= Debugging applications in `{odo-title}` -:context: debugging-applications-in-odo - -toc::[] - -With `{odo-title}`, you can attach a debugger to remotely debug your application. This feature is only supported for NodeJS and Java components. - -Components created with `{odo-title}` run in the debug mode by default. A debugger agent runs on the component, on a specific port. To start debugging your application, you must start port forwarding and attach the local debugger bundled in your Integrated development environment (IDE). - -include::modules/developer-cli-odo-debugging-an-application.adoc[leveloffset=+1] -include::modules/developer-cli-odo-configuring-debugging-parameters.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc deleted file mode 100644 index fba30e7d3f48..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id='deleting-applications'] -= Deleting applications -include::_attributes/common-attributes.adoc[] -:context: deleting-applications - -toc::[] - -You can delete applications and all components associated with the application in your project. - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc deleted file mode 100644 index 962f39a629ad..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="sample-applications"] -= Sample applications -include::_attributes/common-attributes.adoc[] -:context: using-sample-applications - -toc::[] - -`{odo-title}` offers partial compatibility with any language or runtime listed within the {product-title} catalog of component types. For example: - -[source,terminal] ----- -NAME PROJECT TAGS -dotnet openshift 3.1,latest -httpd openshift 2.4,latest -java openshift 8,latest -nginx openshift 1.10,1.12,1.8,latest -nodejs openshift 0.10,4,6,8,latest -perl openshift 5.16,5.20,5.24,latest -php openshift 5.5,5.6,7.0,7.1,latest -python openshift 2.7,3.3,3.4,3.5,3.6,latest -ruby openshift 2.0,2.2,2.3,2.4,latest -wildfly openshift 10.0,10.1,8.1,9.0,latest ----- - -[NOTE] -==== -For `{odo-title}` Java and Node.js are the officially supported component types. -Run `odo catalog list components` to verify the officially supported component types. -==== - -To access the component over the web, create a URL using `odo url create`. - - - -include::modules/developer-cli-odo-sample-applications-git.adoc[leveloffset=+1] -include::modules/developer-cli-odo-sample-applications-binary.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc deleted file mode 100644 index fec5f98f977a..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-devfiles-in-odo"] -= Using devfiles in {odo-title} -include::_attributes/common-attributes.adoc[] -:context: creating-applications-by-using-devfiles - -toc::[] - -include::modules/developer-cli-odo-about-devfiles-in-odo.adoc[leveloffset=+1] - -== Creating a Java application by using a devfile - -.Prerequisites - -* You have installed `{odo-title}`. -* You must know your ingress domain cluster name. Contact your cluster administrator if you do not know it. For example, `apps-crc.testing` is the cluster domain name for https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}]. - -[NOTE] -==== -Currently odo does not support creating devfile components with `--git` or `--binary` flags. You can only create S2I components when using these flags. -==== - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-listing-available-devfile-components.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc deleted file mode 100644 index bc36206cd0d9..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-projects"] -= Working with projects -include::_attributes/common-attributes.adoc[] -:context: working-with-projects - -toc::[] - -Project keeps your source code, tests, and libraries organized in a separate single unit. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc deleted file mode 100644 index 87ecdf93ac03..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id='working-with-storage'] -= Working with storage -include::_attributes/common-attributes.adoc[] -:context: working-with-storage - -toc::[] - -Persistent storage keeps data available between restarts of `{odo-title}`. - -include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../storage/understanding-ephemeral-storage.adoc#storage-ephemeral-storage-overview_understanding-ephemeral-storage[Understanding ephemeral storage]. -* xref:../../../storage/understanding-persistent-storage.adoc#persistent-storage-overview_understanding-persistent-storage[Understanding persistent storage] diff --git a/cli_reference/developer_cli_odo/images b/cli_reference/developer_cli_odo/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/developer_cli_odo/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/installing-odo.adoc b/cli_reference/developer_cli_odo/installing-odo.adoc deleted file mode 100644 index 813a0f156199..000000000000 --- a/cli_reference/developer_cli_odo/installing-odo.adoc +++ /dev/null @@ -1,30 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='installing-odo'] -= Installing odo -include::_attributes/common-attributes.adoc[] -:context: installing-odo - -toc::[] - -// The following section describes how to install `{odo-title}` on different platforms using the CLI or the Visual Studio Code (VS Code) IDE. - -You can install the `{odo-title}` CLI on Linux, Windows, or macOS by downloading a binary. You can also install the OpenShift VS Code extension, which uses both the `{odo-title}` and the `oc` binaries to interact with your OpenShift Container Platform cluster. For {op-system-base-full}, you can install the `{odo-title}` CLI as an RPM. - -[NOTE] -==== -Currently, `{odo-title}` does not support installation in a restricted network environment. -==== - -// You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools* - -include::modules/developer-cli-odo-installing-odo-on-linux.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-windows.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-macos.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-vs-code.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-linux-rpm.adoc[leveloffset=+1] -//// diff --git a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc deleted file mode 100644 index 1b37feceb671..000000000000 --- a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id='managing-environment-variables'] -= Managing environment variables -include::_attributes/common-attributes.adoc[] -:context: managing-environment-variables - -toc::[] - -`{odo-title}` stores component-specific configurations and environment variables in the `config` file. You can use the `odo config` command to set, unset, and list environment variables for components without the need to modify the `config` file. - -include::modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/modules b/cli_reference/developer_cli_odo/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cli_reference/developer_cli_odo/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/odo-architecture.adoc b/cli_reference/developer_cli_odo/odo-architecture.adoc deleted file mode 100644 index 9a68934dc11e..000000000000 --- a/cli_reference/developer_cli_odo/odo-architecture.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="odo-architecture"] -= odo architecture -include::_attributes/common-attributes.adoc[] -:context: odo-architecture - -toc::[] - -This section describes `{odo-title}` architecture and how `{odo-title}` manages resources on a cluster. - -include::modules/developer-cli-odo-developer-setup.adoc[leveloffset=+1] -include::modules/developer-cli-odo-openshift-source-to-image.adoc[leveloffset=+1] -include::modules/developer-cli-odo-openshift-cluster-objects.adoc[leveloffset=+1] -include::modules/developer-cli-odo-push-workflow.adoc[leveloffset=+1] - -// [role="_additional-resources"] -// == Additional resources diff --git a/cli_reference/developer_cli_odo/odo-cli-reference.adoc b/cli_reference/developer_cli_odo/odo-cli-reference.adoc deleted file mode 100644 index faa2e5650493..000000000000 --- a/cli_reference/developer_cli_odo/odo-cli-reference.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='odo-cli-reference'] -= odo CLI reference -include::_attributes/common-attributes.adoc[] -:context: odo-cli-reference - -toc::[] - -include::modules/developer-cli-odo-ref-build-images.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-catalog.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-create.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-delete.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-deploy.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-link.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-service.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-storage.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-flags.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-json-output.adoc[leveloffset=+1] -//// diff --git a/cli_reference/developer_cli_odo/odo-release-notes.adoc b/cli_reference/developer_cli_odo/odo-release-notes.adoc deleted file mode 100644 index 645fca7f25c9..000000000000 --- a/cli_reference/developer_cli_odo/odo-release-notes.adoc +++ /dev/null @@ -1,74 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='odo-release-notes'] -= `{odo-title}` release notes -include::_attributes/common-attributes.adoc[] -:context: odo-release-notes - -toc::[] - -[id="odo-notable-improvements_{context}"] -== Notable changes and improvements in `{odo-title}` version 2.5.0 - -// #5238 -* Creates unique routes for each component, using `adler32` hashing -// #5252 -* Supports additional fields in the devfile for assigning resources: -** cpuRequest -** cpuLimit -** memoryRequest -** memoryLimit -// #5276 -* Adds the `--deploy` flag to the `odo delete` command, to remove components deployed using the `odo deploy` command: -+ -[source,terminal] ----- -$ odo delete --deploy ----- -// #5237 -* Adds mapping support to the `odo link` command -// #5279 -* Supports ephemeral volumes using the `ephemeral` field in `volume` components -// #5270 -* Sets the default answer to `yes` when asking for telemetry opt-in -// #5260 -* Improves metrics by sending additional telemetry data to the devfile registry -// #5287 -* Updates the bootstrap image to `registry.access.redhat.com/ocp-tools-4/odo-init-container-rhel8:1.1.11` -// #5308 -* The upstream repository is available at link:https://github.com/redhat-developer/odo[] - - - -[id="odo-fixed-issues_{context}"] -== Bug fixes -// #5294 -* Previously, `odo deploy` would fail if the `.odo/env` file did not exist. The command now creates the `.odo/env` file if required. -// #5286 -* Previously, interactive component creation using the `odo create` command would fail if disconnect from the cluster. This issue is fixed in the latest release. - - -[id="odo-getting-support_{context}"] -== Getting support - -.For Product - -If you find an error, encounter a bug, or have suggestions for improving the functionality of `{odo-title}`, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose *OpenShift Developer Tools and Services* as a product type and *odo* as a component. - -Provide as many details in the issue description as possible. - -.For Documentation - -If you find an error or have suggestions for improving the documentation, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose the *{product-title}* product type and the *Documentation* component type. - - - - - -////[id="odo-known-issues_{context}"] -== Known issues -//// - -//[id="odo-technology-preview_{context}"] -//== Technology Preview features `{odo-title}` -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/snippets b/cli_reference/developer_cli_odo/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/developer_cli_odo/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/understanding-odo.adoc b/cli_reference/developer_cli_odo/understanding-odo.adoc deleted file mode 100644 index c9901902f1df..000000000000 --- a/cli_reference/developer_cli_odo/understanding-odo.adoc +++ /dev/null @@ -1,20 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id="understanding-odo"] -= Understanding odo -include::_attributes/common-attributes.adoc[] -:context: understanding-odo - -toc::[] - -Red Hat OpenShift Developer CLI (`odo`) is a tool for creating applications on {product-title} and Kubernetes. With `{odo-title}`, you can develop, test, debug, and deploy microservices-based applications on a Kubernetes cluster without having a deep understanding of the platform. - -`{odo-title}` follows a _create and push_ workflow. As a user, when you _create_, the information (or manifest) is stored in a configuration file. When you _push_, the corresponding resources are created on the Kubernetes cluster. All of this configuration is stored in the Kubernetes API for seamless accessibility and functionality. - -`{odo-title}` uses _service_ and _link_ commands to link components and services together. `{odo-title}` achieves this by creating and deploying services based on Kubernetes Operators in the cluster. Services can be created using any of the Operators available on the Operator Hub. After linking a service, `odo` injects the service configuration into the component. Your application can then use this configuration to communicate with the Operator-backed service. - -include::modules/odo-key-features.adoc[leveloffset=+1] -include::modules/odo-core-concepts.adoc[leveloffset=+1] -include::modules/odo-listing-components.adoc[leveloffset=+1] -include::modules/odo-telemetry.adoc[leveloffset=+1] -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc deleted file mode 100644 index 594c44db70b1..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="about-odo-in-a-restricted-environment"] -= About {odo-title} in a restricted environment -:context: about-odo-in-a-restricted-environment - -toc::[] - - -To run `{odo-title}` in a disconnected cluster or a cluster provisioned in a restricted environment, you must ensure that a cluster administrator has created a cluster with a mirrored registry. - - -To start working in a disconnected cluster, you must first xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[push the `odo` init image to the registry of the cluster] and then overwrite the `odo` init image path using the `ODO_BOOTSTRAPPER_IMAGE` environment variable. - - -After you push the `odo` init image, you must xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#mirroring-a-supported-builder-image_creating-and-deploying-a-component-to-the-disconnected-cluster[mirror a supported builder image] from the registry, xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#overwriting-the-mirror-registry_creating-and-deploying-a-component-to-the-disconnected-cluster[overwrite a mirror registry] and then xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#creating-a-nodejs-application-with-odo_creating-and-deploying-a-component-to-the-disconnected-cluster[create your application]. A builder image is necessary to configure a runtime environment for your application and also contains the build tool needed to build your application, for example npm for Node.js or Maven for Java. A mirror registry contains all the necessary dependencies for your application. - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise,openshift-webscale[] -* xref:../../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Mirroring images for a disconnected installation] -endif::[] -* xref:../../../registry/accessing-the-registry.adoc#registry-accessing-directly_accessing-the-registry[Accessing the registry] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc deleted file mode 100644 index 5904d648335f..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-and-deploying-a-component-to-the-disconnected-cluster"] -= Creating and deploying a component to the disconnected cluster -include::_attributes/common-attributes.adoc[] -:context: creating-and-deploying-a-component-to-the-disconnected-cluster - -toc::[] - -After you push the `init` image to a cluster with a mirrored registry, you must mirror a supported builder image for your application with the `oc` tool, overwrite the mirror registry using the environment variable, and then create your component. - -== Prerequisites - -* Install `oc` on the client operating system. -* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system. -* Access to an restricted cluster with a configured {product-registry} or a mirror registry. -* xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[Push the `odo` init image to your cluster registry]. - -include::modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc[leveloffset=+1] -include::modules/developer-cli-odo-overwriting-a-mirror-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc deleted file mode 100644 index c4c0e7da0e0f..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-and-deploying-devfile-components-to-the-disconnected-cluster"] -= Creating and deploying devfile components to the disconnected cluster -include::_attributes/common-attributes.adoc[] -:context: creating-and-deploying-a-component-to-the-disconnected-cluster - -toc::[] - -include::modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules deleted file mode 120000 index 7e8b50bee77a..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc deleted file mode 100644 index 30cecbd5d8e5..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="pushing-the-odo-init-image-to-the-restricted-cluster-registry"] -include::_attributes/common-attributes.adoc[] -= Pushing the {odo-title} init image to the restricted cluster registry -:context: pushing-the-odo-init-image-to-the-restricted-cluster-registry - -toc::[] - -Depending on the configuration of your cluster and your operating system you can either push the `odo` init image to a mirror registry or directly to an {product-registry}. - -== Prerequisites - -* Install `oc` on the client operating system. -* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system. -* Access to a restricted cluster with a configured {product-registry} or a mirror registry. - -include::modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/cli_reference/images b/cli_reference/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cli_reference/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cli_reference/index.adoc b/cli_reference/index.adoc deleted file mode 100644 index 6b4bbc18d69a..000000000000 --- a/cli_reference/index.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-tools-overview"] -= {product-title} CLI tools overview -include::_attributes/common-attributes.adoc[] -:context: cli-tools-overview - -toc::[] - -A user performs a range of operations while working on {product-title} such as the following: - -* Managing clusters -* Building, deploying, and managing applications -* Managing deployment processes -* Developing Operators -* Creating and maintaining Operator catalogs - -{product-title} offers a set of command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal. -These tools expose simple commands to manage the applications, as well as interact with each component of the system. - -[id="cli-tools-list"] -== List of CLI tools - -The following set of CLI tools are available in {product-title}: - -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI (oc)]: This is the most commonly used CLI tool by {product-title} users. It helps both cluster administrators and developers to perform end-to-end operations across {product-title} using the terminal. Unlike the web console, it allows the user to work directly with the project source code using command scripts. - -* xref:../cli_reference/kn-cli-tools.adoc#kn-cli-tools[Knative CLI (kn)]: The Knative (`kn`) CLI tool provides simple and intuitive terminal commands that can be used to interact with OpenShift Serverless components, such as Knative Serving and Eventing. - -* xref:../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[Pipelines CLI (tkn)]: OpenShift Pipelines is a continuous integration and continuous delivery (CI/CD) solution in {product-title}, which internally uses Tekton. The `tkn` CLI tool provides simple and intuitive commands to interact with OpenShift Pipelines using the terminal. - -* xref:../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[opm CLI]: The `opm` CLI tool helps the Operator developers and cluster administrators to create and maintain the catalogs of Operators from the terminal. - -* xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Operator SDK]: The Operator SDK, a component of the Operator Framework, provides a CLI tool that Operator developers can use to build, test, and deploy an Operator from the terminal. It simplifies the process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge. diff --git a/cli_reference/kn-cli-tools.adoc b/cli_reference/kn-cli-tools.adoc deleted file mode 100644 index e5d4c358d7ff..000000000000 --- a/cli_reference/kn-cli-tools.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="kn-cli-tools"] -= Knative CLI for use with {ServerlessProductName} -:context: kn-cli-tools - -toc::[] - -The Knative (`kn`) CLI enables simple interaction with Knative components on {product-title}. - -[id="kn-cli-tools-key-features"] -== Key features - -The Knative (`kn`) CLI is designed to make serverless computing tasks simple and concise. -Key features of the Knative CLI include: - -* Deploy serverless applications from the command line. -* Manage features of Knative Serving, such as services, revisions, and traffic-splitting. -* Create and manage Knative Eventing components, such as event sources and triggers. -* Create sink bindings to connect existing Kubernetes applications and Knative services. -* Extend the Knative CLI with flexible plugin architecture, similar to the `kubectl` CLI. -* Configure autoscaling parameters for Knative services. -* Scripted usage, such as waiting for the results of an operation, or deploying custom rollout and rollback strategies. - -[id="kn-cli-tools-installing-kn"] -== Installing the Knative CLI - -See link:https://docs.openshift.com/serverless/1.28/install/installing-kn.html#installing-kn[Installing the Knative CLI]. diff --git a/cli_reference/modules b/cli_reference/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/cli_reference/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/cli_reference/odo-important-update.adoc b/cli_reference/odo-important-update.adoc deleted file mode 100644 index bbb939cd21f1..000000000000 --- a/cli_reference/odo-important-update.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-important-update.adoc - -:_content-type: CONCEPT -[id="odo-important_update_{context}"] -include::_attributes/attributes-openshift-dedicated.adoc[] -include::_attributes/common-attributes.adoc[] -= Important update on `{odo-title}` -:context: odo-important-update - -toc::[] - -Red Hat does not provide information about `{odo-title}` on the {OCP} documentation site. See the link:https://odo.dev/docs/introduction[documentation] maintained by Red Hat and the upstream community for documentation information related to `{odo-title}`. - -[IMPORTANT] -==== -For the materials maintained by the upstream community, Red Hat provides support under link:https://access.redhat.com/solutions/5893251[Cooperative Community Support]. -==== - diff --git a/cli_reference/openshift_cli/_attributes b/cli_reference/openshift_cli/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/openshift_cli/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/administrator-cli-commands.adoc b/cli_reference/openshift_cli/administrator-cli-commands.adoc deleted file mode 100644 index 3b5c3da2e911..000000000000 --- a/cli_reference/openshift_cli/administrator-cli-commands.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-administrator-commands"] -= OpenShift CLI administrator command reference -include::_attributes/common-attributes.adoc[] -:context: cli-administrator-commands - -toc::[] - -This reference provides descriptions and example commands for OpenShift CLI (`oc`) administrator commands. You must have `cluster-admin` or equivalent permissions to use these commands. - -For developer commands, see the xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference]. - -Run `oc adm -h` to list all administrator commands or run `oc --help` to get additional details for a specific command. - -// The following file is auto-generated from the openshift/oc repository -// OpenShift CLI (oc) administrator commands -include::modules/oc-adm-by-example-content.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cli-administrator-commands"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference] diff --git a/cli_reference/openshift_cli/configuring-cli.adoc b/cli_reference/openshift_cli/configuring-cli.adoc deleted file mode 100644 index 0eace0d03e61..000000000000 --- a/cli_reference/openshift_cli/configuring-cli.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-configuring-cli"] -= Configuring the OpenShift CLI -include::_attributes/common-attributes.adoc[] -:context: cli-configuring-cli - -toc::[] - -[id="cli-enabling-tab-completion"] -== Enabling tab completion - -You can enable tab completion for the Bash or Zsh shells. - -// Enabling tab completion for Bash -include::modules/cli-configuring-completion.adoc[leveloffset=+2] - -// Enabling tab completion for Zsh -include::modules/cli-configuring-completion-zsh.adoc[leveloffset=+2] diff --git a/cli_reference/openshift_cli/developer-cli-commands.adoc b/cli_reference/openshift_cli/developer-cli-commands.adoc deleted file mode 100644 index 517e2cb7a777..000000000000 --- a/cli_reference/openshift_cli/developer-cli-commands.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-developer-commands"] -= OpenShift CLI developer command reference -include::_attributes/common-attributes.adoc[] -:context: cli-developer-commands - -toc::[] - -This reference provides descriptions and example commands for OpenShift CLI (`oc`) developer commands. -ifdef::openshift-enterprise,openshift-origin[] -For administrator commands, see the xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference]. -endif::openshift-enterprise,openshift-origin[] - -Run `oc help` to list all commands or run `oc --help` to get additional details for a specific command. - -// The following file is auto-generated from the openshift/oc repository -// OpenShift CLI (oc) developer commands -include::modules/oc-by-example-content.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-origin[] - -[role="_additional-resources"] -[id="additional-resources_cli-developer-commands"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference] - -endif::openshift-enterprise,openshift-origin[] diff --git a/cli_reference/openshift_cli/extending-cli-plugins.adoc b/cli_reference/openshift_cli/extending-cli-plugins.adoc deleted file mode 100644 index 549e986f667f..000000000000 --- a/cli_reference/openshift_cli/extending-cli-plugins.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-extend-plugins"] -= Extending the OpenShift CLI with plugins -include::_attributes/common-attributes.adoc[] -:context: cli-extend-plugins - -toc::[] - -You can write and install plugins to build on the default `oc` commands, -allowing you to perform new and more complex tasks with the {product-title} CLI. - -// Writing CLI plugins -include::modules/cli-extending-plugins-writing.adoc[leveloffset=+1] - -// Installing and using CLI plugins -include::modules/cli-extending-plugins-installing.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/getting-started-cli.adoc b/cli_reference/openshift_cli/getting-started-cli.adoc deleted file mode 100644 index 8490c339380d..000000000000 --- a/cli_reference/openshift_cli/getting-started-cli.adoc +++ /dev/null @@ -1,53 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-getting-started"] -= Getting started with the OpenShift CLI -include::_attributes/common-attributes.adoc[] -:context: cli-developer-commands - -toc::[] - -// About the CLI -include::modules/cli-about-cli.adoc[leveloffset=+1] - -[id="installing-openshift-cli"] -== Installing the OpenShift CLI - -You can install the OpenShift CLI (`oc`) either by downloading the binary or by using an RPM. - -// Installing the CLI by downloading the binary -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -// Installing the CLI by using the web console -include::modules/cli-installing-cli-web-console.adoc[leveloffset=+2] - -// Installing the CLI on Linux by using the web console -include::modules/cli-installing-cli-web-console-linux.adoc[leveloffset=+3] - -// Installing the CLI on Windows by using the web console -include::modules/cli-installing-cli-web-console-windows.adoc[leveloffset=+3] - -// Installing the CLI on macOS by using the web console -include::modules/cli-installing-cli-web-console-macos.adoc[leveloffset=+3] - -ifndef::openshift-origin[] -// Installing the CLI by using an RPM -include::modules/cli-installing-cli-rpm.adoc[leveloffset=+2] -endif::[] - -// Installing the CLI by using Homebrew -include::modules/cli-installing-cli-brew.adoc[leveloffset=+2] - -// Logging in to the CLI -include::modules/cli-logging-in.adoc[leveloffset=+1] - -// Logging in to the CLI by using the web -include::modules/cli-logging-in-web.adoc[leveloffset=+1] - -// Using the CLI -include::modules/cli-using-cli.adoc[leveloffset=+1] - -// Getting help -include::modules/cli-getting-help.adoc[leveloffset=+1] - -// Logging out of the CLI -include::modules/cli-logging-out.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/images b/cli_reference/openshift_cli/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/openshift_cli/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc b/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc deleted file mode 100644 index 25d0e4420b04..000000000000 --- a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-cli-plugin-krew"] -= Managing CLI plugins with Krew -include::_attributes/common-attributes.adoc[] -:context: managing-cli-plugins-krew - -toc::[] - -You can use Krew to install and manage plugins for the OpenShift CLI (`oc`). - -:FeatureName: Using Krew to install and manage plugins for the OpenShift CLI -include::snippets/technology-preview.adoc[] - -// Installing a CLI plugin with Krew -include::modules/cli-krew-install-plugin.adoc[leveloffset=+1] - -// Updating a CLI plugin with Krew -include::modules/cli-krew-update-plugin.adoc[leveloffset=+1] - -// Removing a CLI plugin with Krew -include::modules/cli-krew-remove-plugin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_managing-cli-plugins-krew"] -== Additional resources - -* link:https://krew.sigs.k8s.io/[Krew] -* xref:../../cli_reference/openshift_cli/extending-cli-plugins.adoc#cli-extend-plugins[Extending the OpenShift CLI with plugins] diff --git a/cli_reference/openshift_cli/managing-cli-profiles.adoc b/cli_reference/openshift_cli/managing-cli-profiles.adoc deleted file mode 100644 index 8978acebfc22..000000000000 --- a/cli_reference/openshift_cli/managing-cli-profiles.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-cli-profiles"] -= Managing CLI profiles -include::_attributes/common-attributes.adoc[] -:context: managing-cli-profiles - -toc::[] - -A CLI configuration file allows you to configure different profiles, or contexts, for use with the xref:../../cli_reference/index.adoc#cli-tools-overview[CLI tools overview]. A context consists of -ifndef::microshift[] -xref:../../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] -endif::[] -ifdef::microshift[] -user authentication -endif::[] -and {product-title} server information associated with a _nickname_. - -include::modules/about-cli-profiles-switch.adoc[leveloffset=+1] - -include::modules/manual-configuration-of-cli-profiles.adoc[leveloffset=+1] - -include::modules/load-and-merge-rules.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/modules b/cli_reference/openshift_cli/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/openshift_cli/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/snippets b/cli_reference/openshift_cli/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/openshift_cli/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/usage-oc-kubectl.adoc b/cli_reference/openshift_cli/usage-oc-kubectl.adoc deleted file mode 100644 index a6c2cf909381..000000000000 --- a/cli_reference/openshift_cli/usage-oc-kubectl.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="usage-oc-kubectl"] -= Usage of oc and kubectl commands -include::_attributes/common-attributes.adoc[] -:context: usage-oc-kubectl - -The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with {product-title}, or you can gain extended functionality by using the `oc` binary. - -== The oc binary - -The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional {product-title} features, including: - -* **Full support for {product-title} resources** -+ -Resources such as `DeploymentConfig`, `BuildConfig`, `Route`, `ImageStream`, and `ImageStreamTag` objects are specific to {product-title} distributions, and build upon standard Kubernetes primitives. -+ -* **Authentication** -+ -ifndef::microshift[] -The `oc` binary offers a built-in `login` command for authentication and lets you work with {product-title} projects, which map Kubernetes namespaces to authenticated users. -Read xref:../../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] for more information. -endif::[] -+ -ifdef::microshift[] -The `oc` binary offers a built-in `login` command for authentication to {product-title}. -endif::[] -+ -* **Additional commands** -+ -The additional command `oc new-app`, for example, makes it easier to get new applications started using existing source code or pre-built images. Similarly, the additional command `oc new-project` makes it easier to start a project that you can switch to as your default. - -[IMPORTANT] -==== -If you installed an earlier version of the `oc` binary, you cannot use it to complete all of the commands in {product-title} {product-version}. If you want the latest features, you must download and install the latest version of the `oc` binary corresponding to your {product-title} server version. -==== - -Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server. - -.Compatibility Matrix - -[cols="1,1,1"] -|=== - -| -|*X.Y* (`oc` Client) -|*X.Y+N* footnote:versionpolicyn[Where *N* is a number greater than or equal to 1.] (`oc` Client) - -|*X.Y* (Server) -|image:redcircle-1.png[] -|image:redcircle-3.png[] - -|*X.Y+N* footnote:versionpolicyn[] (Server) -|image:redcircle-2.png[] -|image:redcircle-1.png[] - -|=== -image:redcircle-1.png[] Fully compatible. - -image:redcircle-2.png[] `oc` client might not be able to access server features. - -image:redcircle-3.png[] `oc` client might provide options and features that might not be compatible with the accessed server. - -== The kubectl binary - -The `kubectl` binary is provided as a means to support existing workflows and scripts for new {product-title} users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the {product-title} cluster. - -You can install the supported `kubectl` binary by following the steps to xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Install the OpenShift CLI]. The `kubectl` binary is included in the archive if you download the binary, or is installed when you install the CLI by using an RPM. - -For more information, see the link:https://kubernetes.io/docs/reference/kubectl/overview/[kubectl documentation]. diff --git a/cli_reference/opm/_attributes b/cli_reference/opm/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/opm/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/opm/cli-opm-install.adoc b/cli_reference/opm/cli-opm-install.adoc deleted file mode 100644 index 70d2c01c01bf..000000000000 --- a/cli_reference/opm/cli-opm-install.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-opm-install"] -= Installing the opm CLI -include::_attributes/common-attributes.adoc[] -:context: cli-opm-install - -toc::[] - -include::modules/olm-about-opm.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for more information about the bundle format. -* To create a bundle image using the Operator SDK, see -xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-working-bundle-images[Working with bundle images]. - -include::modules/olm-installing-opm.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="opm-addtl-resources"] -== Additional resources - -* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for `opm` procedures including creating, updating, and pruning catalogs. diff --git a/cli_reference/opm/cli-opm-ref.adoc b/cli_reference/opm/cli-opm-ref.adoc deleted file mode 100644 index ae7d744ef70f..000000000000 --- a/cli_reference/opm/cli-opm-ref.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-opm-ref"] -= opm CLI reference -include::_attributes/common-attributes.adoc[] -:context: cli-opm-ref - -toc::[] - -The `opm` command-line interface (CLI) is a tool for creating and maintaining Operator catalogs. - -.`opm` CLI syntax -[source,terminal] ----- -$ opm [] [] [] ----- - -.Global flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-skip-tls-verify` -|Skip TLS certificate verification for container image registries while pulling bundles or indexes. - -|`--use-http` -|When you pull bundles, use plain HTTP for container image registries. - -|=== - -:FeatureName: The SQLite-based catalog format, including the related CLI commands, -include::snippets/deprecated-feature.adoc[] - -include::modules/opm-cli-ref-generate.adoc[leveloffset=+1] -include::modules/opm-cli-ref-index.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-file-based-catalogs_olm-packaging-format[Operator Framework packaging format] -* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs-fb[Managing custom catalogs] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -include::modules/opm-cli-ref-init.adoc[leveloffset=+1] -include::modules/opm-cli-ref-migrate.adoc[leveloffset=+1] -include::modules/opm-cli-ref-render.adoc[leveloffset=+1] -include::modules/opm-cli-ref-serve.adoc[leveloffset=+1] -include::modules/opm-cli-ref-validate.adoc[leveloffset=+1] \ No newline at end of file diff --git a/cli_reference/opm/images b/cli_reference/opm/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/opm/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/opm/modules b/cli_reference/opm/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/opm/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/opm/snippets b/cli_reference/opm/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/opm/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/osdk/_attributes b/cli_reference/osdk/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/osdk/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/osdk/cli-osdk-install.adoc b/cli_reference/osdk/cli-osdk-install.adoc deleted file mode 100644 index 085ad64eb12b..000000000000 --- a/cli_reference/osdk/cli-osdk-install.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-osdk-install"] -= Installing the Operator SDK CLI -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-install - -toc::[] - -The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators. - -Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. - -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. - -[NOTE] -==== -{product-title} {product-version} supports Operator SDK {osdk_ver}. -==== - -include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1] - -include::modules/osdk-installing-cli-macos.adoc[leveloffset=+1] \ No newline at end of file diff --git a/cli_reference/osdk/cli-osdk-ref.adoc b/cli_reference/osdk/cli-osdk-ref.adoc deleted file mode 100644 index 10b97dfa8c90..000000000000 --- a/cli_reference/osdk/cli-osdk-ref.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-osdk-ref"] -= Operator SDK CLI reference -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-ref - -toc::[] - -The Operator SDK command-line interface (CLI) is a development kit designed to make writing Operators easier. - -.Operator SDK CLI syntax -[source,terminal] ----- -$ operator-sdk [] [] [] ----- - -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. - -include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-completion.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-create.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-deploy-olm_osdk-working-bundle-images[Bundling an Operator and deploying with Operator Lifecycle Manager] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand. - -include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2] - -include::modules/osdk-cli-ref-init.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes. - -include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2] -include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool. diff --git a/cli_reference/osdk/images b/cli_reference/osdk/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/osdk/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/osdk/modules b/cli_reference/osdk/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/osdk/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/osdk/snippets b/cli_reference/osdk/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/osdk/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/snippets b/cli_reference/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/cli_reference/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/cli_reference/tkn_cli/_attributes b/cli_reference/tkn_cli/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/tkn_cli/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/tkn_cli/images b/cli_reference/tkn_cli/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cli_reference/tkn_cli/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cli_reference/tkn_cli/installing-tkn.adoc b/cli_reference/tkn_cli/installing-tkn.adoc deleted file mode 100644 index 5ea19aa0ff94..000000000000 --- a/cli_reference/tkn_cli/installing-tkn.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id='installing-tkn'] -= Installing tkn -include::_attributes/common-attributes.adoc[] -:context: installing-tkn - -toc::[] - -Use the CLI tool to manage {pipelines-title} from a terminal. The following section describes how to install the CLI tool on different platforms. - -You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*. - -:FeatureName: Running {pipelines-title} on ARM hardware -include::snippets/technology-preview.adoc[] - -[NOTE] -==== -Both the archives and the RPMs contain the following executables: - -* tkn -* tkn-pac -* opc -==== - -:FeatureName: Running {pipelines-title} with the `opc` CLI tool -include::snippets/technology-preview.adoc[] - -// Install tkn on Linux -include::modules/op-installing-tkn-on-linux.adoc[leveloffset=+1] - -// Install tkn on Linux using RPM -include::modules/op-installing-tkn-on-linux-using-rpm.adoc[leveloffset=+1] - -//Install tkn on Windows -include::modules/op-installing-tkn-on-windows.adoc[leveloffset=+1] - -//Install tkn on macOS -include::modules/op-installing-tkn-on-macos.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/modules b/cli_reference/tkn_cli/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/cli_reference/tkn_cli/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/cli_reference/tkn_cli/op-configuring-tkn.adoc b/cli_reference/tkn_cli/op-configuring-tkn.adoc deleted file mode 100644 index 675db608c42c..000000000000 --- a/cli_reference/tkn_cli/op-configuring-tkn.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="op-configuring-tkn"] -= Configuring the OpenShift Pipelines tkn CLI -include::_attributes/common-attributes.adoc[] -:context: configuring-tkn - -toc::[] - -Configure the {pipelines-title} `tkn` CLI to enable tab completion. - -// Enabling tab completion -include::modules/op-tkn-enabling-tab-completion.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/op-tkn-reference.adoc b/cli_reference/tkn_cli/op-tkn-reference.adoc deleted file mode 100644 index 66e36f075552..000000000000 --- a/cli_reference/tkn_cli/op-tkn-reference.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id='op-tkn-reference'] -= OpenShift Pipelines tkn reference -include::_attributes/common-attributes.adoc[] -:context: op-tkn-reference - -toc::[] - - -This section lists the basic `tkn` CLI commands. - -== Basic syntax -`tkn [command or options] [arguments...]` - -== Global options -`--help, -h` - -// Utility commands -include::modules/op-tkn-utility-commands.adoc[leveloffset=+1] - -// Pipeline management commands -include::modules/op-tkn-pipeline-management.adoc[leveloffset=+1] - -// Pipeline run commands -include::modules/op-tkn-pipeline-run.adoc[leveloffset=+1] - -// Task management commands -include::modules/op-tkn-task-management.adoc[leveloffset=+1] - -// Task run commands -include::modules/op-tkn-task-run.adoc[leveloffset=+1] - -// Condition management commands -include::modules/op-tkn-condition-management.adoc[leveloffset=+1] - -// Pipeline resources commands -include::modules/op-tkn-pipeline-resource-management.adoc[leveloffset=+1] - -// ClusterTask management commands -include::modules/op-tkn-clustertask-management.adoc[leveloffset=+1] - -// Trigger management commands -include::modules/op-tkn-trigger-management.adoc[leveloffset=+1] - -// Hub interaction commands -include::modules/op-tkn-hub-interaction.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/snippets b/cli_reference/tkn_cli/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/tkn_cli/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cloud_providers/PLACEHOLDER b/cloud_providers/PLACEHOLDER deleted file mode 100644 index 4020393e57eb..000000000000 --- a/cloud_providers/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please delete this file once you have assemblies here. - diff --git a/cloud_providers/_attributes b/cloud_providers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cloud_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cloud_providers/images b/cloud_providers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cloud_providers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cloud_providers/modules b/cloud_providers/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/cloud_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/contributing_to_docs/contributing.adoc b/contributing_to_docs/contributing.adoc deleted file mode 100644 index 3f051908ad0d..000000000000 --- a/contributing_to_docs/contributing.adoc +++ /dev/null @@ -1,231 +0,0 @@ -[id="contributing-to-docs-contributing"] -= Contribute to OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Basic information about the OpenShift GitHub repository -:imagesdir: ../images - -toc::[] - -== Different ways to contribute - -There are a few different ways you can contribute to OpenShift documentation: - -* Email the OpenShift Docs team at openshift-docs@redhat.com. -* Create a link:https://github.com/openshift/openshift-docs/issues/new[GitHub] or link:https://issues.redhat.com/secure/CreateIssueDetails!init.jspa?pid=12332330&summary=Documentation_issue&issuetype=1&components=12367614&priority=10200&versions=12385624[Jira issue] for the most relevant documentation component. -* Submit a pull request (PR). You can create a local clone of your own fork of the link:https://github.com/openshift/openshift-docs[openshift-docs repository], make your changes, and submit a PR. This option is best if you have substantial changes. If you open a PR, be sure that all of its contents are related and apply to the same versions. - -*What happens when you submit a PR?* - -The following diagram outlines the OpenShift documentation contribution process: - -image::osdocs-contribution-workflow.png[OpenShift documentation contribution workflow] - -When you submit a PR, the https://github.com/orgs/openshift/teams/team-documentation[OpenShift Docs team] reviews the PR and arranges further reviews by Quality Engineering (QE), subject matter experts (SMEs), and others, as required. If the PR requires changes, updates, or corrections, the reviewers add comments in the PR. We might request that you make the changes, or let you know that we incorporated your content in a different PR. Occasionally, we might add commits to the original PR directly. When the PR has been reviewed, all updates are complete, and all commits are squashed, the documentation team merges the PR and applies it to the valid versions. - -For a more detailed description of the contribution workflow, see link:create_or_edit_content.adoc#understanding-the-contribution-workflow[Understanding the contribution workflow]. - -== Repository organization -Each top directory in the OpenShift documentation repository can include a -collection of top level assemblies and subdirectories that contain more -assemblies. The exceptions to this rule are directories whose names -start with an underscore (like `_builder_lib` and `_javascripts`), which contain -the assets used to generate the finished documentation. - -Each top level `` directory contains AsciiDoc assembly files, any `` -subdirectories, and symlinks to the `images` and `modules` directories that -contain all the images and modules for the collection. - ----- -/ -/topic_dir1 -/subtopic_dir1 -/subtopic_dirN -/topic_dir/assembly1.adoc -/topic_dir/assemblyN.adoc -/topic_dir/subtopic_dir1/assembly1.adoc -/topic_dir/subtopic_dirN/assemblyN.adoc -/topic_dir/~images -/topic_dir/~modules -... -/topic_dir2 ----- - -== Version management -Most of the content applies to all five OpenShift products: OKD, OpenShift -Online, OpenShift Dedicated, ROSA and OpenShift Container Platform. While a large -amount of content is reused for all product collections, some information -applies to only specific collections. Content inclusion and exclusion is managed -on the assembly level by specifying distributions in the -`_topic_map.yml` files in the `_topic_maps` folder or by using `ifdef/endif` statements in individual -files. - -//// -While it is _possible_ -to accomplish this solely with Git branches to maintain slightly different -versions of a given topic, doing so would make the task of maintaining internal -consistency extremely difficult for content contributors. - -Git branching is still extremely valuable, and serves the important role of -tracking the release versions of documentation for the various OpenShift -products. -//// - -=== Conditional text between products -OpenShift documentation uses AsciiDoc's `ifdef/endif` macro to conditionalize -and reuse content across the different OpenShift products, down to the -single-line level. - -The supported distribution attributes used with the OpenShift build mechanism -are: - -* _openshift-origin_ -* _openshift-online_ -* _openshift-enterprise_ -* _openshift-dedicated_ -* _openshift-aro_ -* _openshift-webscale_ -* _openshift-rosa_ - -These attributes can be used by themselves or in conjunction to conditionalize -text within an assembly or module. - -Here is an example of this concept in use: - ----- -This first line is unconditionalized, and will appear for all versions. - -\ifdef::openshift-online[] -This line will only appear for OpenShift Online. -\endif::[] - -ifdef::openshift-enterprise -This line will only appear for OpenShift Container Platform. -\endif::[] - -ifdef::openshift-origin,openshift-enterprise -This line will appear for OKD and OpenShift Container Platform, but not for OpenShift Online or OpenShift Dedicated. -\endif::[] ----- - -Note that the following limitation exists when conditionalizing text: - -* While the `ifdef/endif` blocks have no size limit, do not use them to -to conditionalize an entire file. If an entire file is specific to a -only some OpenShift distributions, specify them in the `_topic_map.yml` -file. - -== Release branches - -With the combination of conditionalizing content within files with -`ifdef/endif` and conditionalizing whole files in the `_topic_map.yml` -file, the `main` branch of -this repository always contains a complete set of documentation for all -OpenShift products. However, when and as new versions of an OpenShift product -are released, the `main` branch is merged down to new or existing release -branches. Here is the general naming scheme used in the branches: - -* `main` - This is our *working* branch. -* `enterprise-N.N` - OpenShift Container Platform support releases. The docs -for OpenShift Online and OpenShift Dedicated are based on the appropriate -`enterprise-N.N` branch. - -On a 12-hourly basis, the documentation web sites are rebuilt for each of these -branches. This way the published content for each released version of an -OpenShift product will remain the same while development continues on the -`main` branch. Additionally, any corrections or additions that are -"cherry-picked" into the release branches will show up in the published -documentation after 12 hours. - -[NOTE] -==== -All OpenShift content development for the 4.x stream occurs on the `main`, or - *working* branch. -Therefore, when submitting your work the PR must be created against the `main` -branch. After it is reviewed, a writer will apply the content to the relevant -release branches. If you know which branches a change applies to, be sure to -specify it in your PR. - -When adding or updating content for version 3.11, you should create a feature -branch against enterprise-3.11 to submit your changes. -==== - -== Adding files to the collection -After you create assembly files, you must add them to the `_topic_map.yml` so -that the build system can render them. The documentation build system reads -the `_distro_map.yml` from the main branch to determine -which branches to build and then the `_topic_map.yml` file -for each of the branches -to construct the content from the source files and publish to the relevant -product site at https://docs.openshift.com. The build system _only_ reads this -file to determine which topic files to include. Therefore, all new assemblies that -are created must be included in the `_topic_map.yml` file in -order to be processed by the build system. - -For all supported versions, the topic map files are available in the `_topic_maps` folder. Older versions support `_topic_map.yml` file in the root folder. - -OpenShift Dedicated and OpenShift ROSA have their own topic maps: `_topic_map_osd.yml` and `_topic_map_rosa.yml`. Edits to these files should be coordinated with Service Delivery documentation team members as that team is primarily responsible for maintaining this content. - -[NOTE] -==== -Module files are included in the appropriate assembly files. Modules are not added directly to the `_topic_map.yml` file. -==== - -=== Topic map file format -For supported branches the `_topic_map.yml` is based in the `_topic_maps` folder in the root directory and are organized (primarily) by distributions. - -The `_topic_map.yml` file uses the following format: - ----- ---- //<1> -Name: Origin of the Species <2> -Dir: origin_of_the_species <3> -Distros: all <4> -Topics: - - Name: The Majestic Marmoset <5> - File: the_majestic_marmoset <6> - Distros: all - - Name: The Curious Crocodile - File: the_curious_crocodile - Distros: openshift-online,openshift-enterprise <4> - - Name: The Numerous Nematodes - Dir: the_numerous_nematodes <7> - Topics: - - Name: The Wily Worm <8> - File: the_wily_worm - - Name: The Acrobatic Ascarid <= Sub-topic 2 name - File: the_acrobatic_ascarid <= Sub-topic 2 file under / ----- -<1> Record separator at the top of each topic group. -<2> Display name of topic group. -<3> Directory name of topic group. -<4> Which OpenShift versions this topic group is part of. -* The *Distros* setting is optional for topic groups and topic items. By -default, if the *Distros* setting is not used, it is processed as if it was set -to *Distros: all* for that particular topic or topic group. This means that -topic or topic group will appear in all product documentation versions. -* The *all* value for *Distros* is a synonym for -_openshift-origin,openshift-enterprise,openshift-online,openshift-dedicated,openshift-aro,openshift-webscale_. -* The *all* value overrides other values, so _openshift-online,all_ is processed -as *all*. -* Do not use _openshift-dedicated_ or _openshift-rosa_ in the main `_topic_map.yml` file. Use the distribution specific topic map file. -<5> Assembly name. -<6> Assembly file under the topic group dir without `.adoc`. -<7> This topic is actually a subtopic group. Instead of a `File` path it has a -`Dir` path and `Topics`, just like a top-level topic group. -<8> Assemblies belonging to a subtopic group are listed just like regular assemblies -with a `Name` and `File`. - - - -== Next steps -* First, you should link:tools_and_setup.adoc[install and set up the tools and software] -on your workstation so that you can contribute. -* Next, link:doc_guidelines.adoc[review the documentation guidelines] to -understand some basic guidelines to keep things consistent -across our content. -* If you are ready to create content, or want to edit existing content, the -link:create_or_edit_content.adoc[create or edit content] topic describes how -you can do this by creating a working branch. diff --git a/contributing_to_docs/contributing_user_stories.adoc b/contributing_to_docs/contributing_user_stories.adoc deleted file mode 100644 index 3a3f9ae6878d..000000000000 --- a/contributing_to_docs/contributing_user_stories.adoc +++ /dev/null @@ -1,97 +0,0 @@ -[[contributing-user-stories]] -= Contribute user stories to OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Basic information about how to create user stories for OpenShift GitHub repository - -toc::[] - -== Modularization backstory -OpenShift docs are modularized, starting from OpenShift 4.1. -All existing content has been replaced with content that is based on user stories and -complies with the modularization guidelines. All future content must both -support a user story and be modular. - -== How do I contribute modularized content? -To contribute modularized content, you need to write a user story, create -documentation modules to support the user story, and create an assembly for the -story. - -== What if I don't want to write in modules? -If you don't want to write the modules yourself but have a content change, -write a user story, provide details to support the story, and reach out to the -OpenShift docs team. - -== How do I write a user story? Is there a template? -Instead of a template, we have a series of questions for you to answer to -create the user story. Follow the same steps if you are writing the modules -yourself or if you plan to work with the docs team. - -The basic format of a user story is: - ----- -As a , I want to because . ----- - -For example, "As a cluster administrator, I want to enable an Auto Scaling group to manage my OpenShift Enterprise -cluster deployed on AWS because I want my node count to scale based on application demand." - -Use the following questions to guide you in providing the context for your user story and the necessary technical details to start a draft. -You don't have to answer all of these questions, only the ones that make sense to your particular user story. - -=== Feature info -* What is the feature being developed? What does it do? -* How does it work? -* Are there any configuration files/settings/parameters being added or modified? Are any new commands being added or modified? -* What tools or software does the docs team need to test how this feature works? Does the docs team need to update any installed software? -* Are there any existing blogs, Wiki posts, Kbase articles, or Bzs involving this feature? Or any other existing information that may help to understand this feature? - -=== Customer impact -* Who is the intended audience for this feature? If it's for Enterprise, does it apply to developers, admins, or both? -* Why is it important for our users? Why would they want to use this feature? How does it benefit them? -* How will the customer use it? Is there a use case? -* How will the customer interact with this feature? Client tools? Web console? REST API? - -=== Product info -* Is this feature being developed for Online? Enterprise? Dedicated? OKD? All? -* Will this feature be rolled back to previous versions? -* If it's for Online, what type of plan do users need to use this feature? -* Is it user-facing, or more behind-the-scenes admin stuff? -* What tools or software does the docs team need to test how this feature works? - -== How do I write in modules? -The full guidelines for writing modules are in the Customer Content Services (CCS) -link:https://redhat-documentation.github.io/modular-docs/[modularization guide]. - -The main concepts of writing in modules are: - -* Each assembly contains the information required for a user to achieve a single -goal. -* Assemblies contain primarily `include` statements, which are references to -smaller, targeted module files. -* Modules can contain conceptual information, reference information, or steps, -but not a combination of the types. - -For example, a simple assembly might contain the following three modules: - -* A concept module that contains background information about the feature -that the user will configure -* A reference module that contains an annotated sample yaml file that the user -needs to modify -* A procedure module that contains the prerequisites that the user needs to -complete before they start configuring and steps that the user takes to -complete the configuration. - -The `enterprise-4.1` branch contains sample assemblies that explain how to -get started with modular documentation for OpenShift and that serve as -references for including modules in assemblies. The -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/mod_docs_guide/mod-docs-conventions-ocp.adoc[Modular Docs OpenShift conventions] -assembly contains the -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/mod-docs-ocp-conventions.adoc[Modular docs OpenShift conventions] -reference module, and the -link:https://github.com/openshift/openshift-docs/blob/enterprise-4.1/mod_docs_guide/getting-started-modular-docs-ocp.adoc[Getting started with modular docs on OpenShift] -assembly contains the -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/creating-your-first-content.adoc[Creating your first content] -procedure module. diff --git a/contributing_to_docs/create_or_edit_content.adoc b/contributing_to_docs/create_or_edit_content.adoc deleted file mode 100644 index 57c0605697fe..000000000000 --- a/contributing_to_docs/create_or_edit_content.adoc +++ /dev/null @@ -1,270 +0,0 @@ -[id="contributing-to-docs-create-or-edit-content"] -= Create content or edit existing content -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Create feature branch to contribute new content or updates -:imagesdir: ../images - -toc::[] - -== Before you begin -Before you create or edit content: - -* Read and review the link:contributing.adoc[Contribute to OpenShift documentation] -topic to understand some basics -* link:tools_and_setup.adoc[Install and set up the tools and software] -required to contribute -* Read and review the link:doc_guidelines.adoc[Documentation guidelines] topic -to understand the basic guidelines for consistency - -== Understanding the contribution workflow - -The following diagram outlines the steps required to add content to the OpenShift documentation: - -image::osdocs-contribution-workflow.png[OpenShift documentation contribution workflow] - -After you have identified a documentation requirement and created a ticket, you can contribute to the documentation directly or the OpenShift Docs team can create the content. - -When you contribute content directly, you must create a feature branch in a local clone of your own fork of the link:https://github.com/openshift/openshift-docs[openshift-docs repository]. After gathering stakeholder input and completing your technical testing, you can develop your documentation draft in your local feature branch. For more information about working with feature branches, see link:https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches[the GitHub documentation]. By using AsciiBinder on your local machine, you can create a local preview to review your updates. - -To submit your content for review, you must push your local feature branch to your openshift-docs fork on GitHub and create a pull request (PR). The PR is a request for the updates in your feature branch on your fork to be merged into the relevant branch in the mainstream openshift-docs repository. In the *Open a pull request* dialog, you can add a description, review the content updates, and create the PR. After your PR is created, the Netlify bot automatically generates a preview build and provides a preview link in a PR comment. - -The OpenShift Docs team reviews the PR and the Netlify preview build. The team also requests reviews from Quality Engineering (QE), subject matter experts (SMEs), and others, depending on the content that is submitted. You can apply any suggested changes by updating the original commit in your local feature branch. If you have multiple commits in your PR, you must squash them into one commit. After you push the additional updates to your fork, the PR and the Netlify preview are updated automatically. - -When all of the required approvals are in place, the OpenShift Docs team merges the PR and cherry picks the content to the relevant branches. When the PR is merged and cherry picked, the content is automatically published after a short while. The OpenShift Docs team then checks the published content, add links in the documentation ticket, and closes the ticket to complete the request. - -The following sections in this document provide detailed steps to create or edit OpenShift documentation content. - -== Ensure your local repository is in sync with the remote -Before you create a local feature branch, it is good practice to ensure that -your local source branch is in sync with the remote and that you have all the -latest changes. You must also ensure that your forked repository is also in sync -with the remote repository. - -[NOTE] -==== -Because most changes in this repository must be committed to the `main` -branch, the following process uses `main` as the name of the source -branch. If you must use another branch as the source for your change, make -sure that you consistently use that branch name instead of `main`. -==== - -1. From your local repository, make sure you have the `main` branch checked -out: -+ ----- -$ git checkout main ----- - -2. Fetch the current state of the OpenShift documentation repository: -+ ----- -$ git fetch upstream ----- - -3. Incorporate the commits from the remote repository, in this case -`openshift/openshift-docs`, into your local repository: -+ ----- -$ git rebase upstream/main ----- - -4. Push the latest updates to your forked repository so that it is also in sync -with the remote: -+ ----- -$ git push origin main ----- - -== Add content or update existing content on local branch -With your local and forked repositories in sync with the remote, you can now -create a local feature branch where you will make all your updates, or create -any new content. - -*Step 1:* Create local feature branch - -The following command creates a local feature branch from the branch that you are currently on, and checks it out -automatically. Be sure to replace `` with a suitable name. -Also, be sure that the changes made on this branch are closely related. -You must create separate PRs for bugfix changes (for an old or current release) -and enhancement changes (for an upcoming new release). - ----- -$ git checkout -b ----- - -[NOTE] -==== -This command creates a new specified branch and also checks it out, so you will -automatically switch to the new branch. -==== - -*Step 2:* Create content or update existing content as required - -With the local feature branch created and checked out, you can now edit any content or -start adding new content. - -Ensure that any new file contains the required metadata as described -in the link:doc_guidelines.adoc[documentation guidelines] topic, including -naming and title conventions. - -*Step 3:* Add all of your changes to a pending commit - -When you are finished making all of your changes, used asciibinder to build -the updated or new content, and reviewed the rendered changes, run the following -command to add those changes to a pending commit: - ----- -$ git add . ----- - -*Step 4:* Commit your changes - -After adding your changes to a pending commit, run the following command to -commit those changes locally: - ----- -$ git commit -am "Detailed comments about what changes were made; for example, fixed typo" ----- - -*Step 5:* Rebase updates from `main` into your feature branch - -Remember that you must rebase against the branch that you created this feature -branch from. In most cases, it will be the main branch for the 4.x stream. - ----- -$ git rebase upstream/main ----- - -[NOTE] -==== -If you find any conflicts you must fix those, and repeat steps 3 and 4. -==== - -*Step 6:* Push all changes to your GitHub account - -After you have rebased, fixed any conflicts, and committed your changes, you can -push them to your GitHub account. This command adds your local feature branch to -your GitHub repository: - ----- -$ git push origin ----- - -[id="submit-PR"] -== Submit PR to merge your work - -When you have pushed your changes to your GitHub account, you can submit a PR to -have your work from your GitHub fork to the `main` branch of the OpenShift -documentation repository. The documentation team will review the work, advise of -any further changes that are required, and finally merge your work. - -1. Go to your forked GitHub repository on the GitHub website, and you should see -your feature branch that includes all of your work. -2. Click on *Pull Request* to submit the PR against the `main` branch of the -`openshift-docs` repository. -3. Fill out the information requested on the template. -** If you know which product versions your change applies to, include a comment -that specifies the minimum version that the change applies to. The docs team -maintains these branches for all active and future distros and your PR will be -applied to one or more of these branches. -*** PR applies to all versions after a specific version (e.g. 4.8): 4.8+ -*** PR applies to the in-development version (e.g. 4.12) and future versions: 4.12+ -*** PR applies only to a specific single version (e.g. 4.10): 4.10 -*** PR applies to multiple specific versions (e.g. 4.8-4.10): 4.8, 4.9, 4.10 -** Provide a link to the preview. Automatic preview functionality is currently only available for some branches. For PRs that update the rendered build in any way against branches that do not create an automated preview: -*** OpenShift documentation team members (core and aligned) must include a link to a locally generated preview. -*** External contributors can request a generated preview from the OpenShift documentation team. -** All documentation changes that impact the meaning of the docs must be verified by a QE team associate before merging. -** Provide any other information about the change that the docs team might need to understand it. -4. Make sure that you squash to one commit before submitting your PR. -5. Ask for review from the OpenShift docs team: -** For community authors: Request a review by tagging @openshift/team-documentation or @kalexand-rh in a GitHub comment. -** For Red Hat associates: -*** For normal peer requests, add a comment that contains this text: /label peer-review-needed -*** For normal merge review requests, add a comment that contains this text: /label merge-review-needed -*** For urgent peer review requests, ping @peer-review-squad requesting a review in the #forum-docs-review channel (CoreOS Slack workspace) and provide the following information: -**** A link to the PR. -**** The size of the PR that the GitHub bot assigns (ex: XS, S, M, L, XL). -**** Details about how the PR is urgent. -*** For urgent merge requests, ping @merge-review-squad in the #forum-docs-review channel (CoreOS Slack workspace). -*** Except for changes that do not impact the meaning of the content, QE review is required before content is merged. - -== Confirm your changes have been merged - -When your PR has been merged into the `main` branch, you should confirm and -then sync your local and GitHub repositories with the `main` branch. - -1. On your workstation, switch to the `main` branch: -+ ----- -$ git checkout main ----- - -2. Pull the latest changes from `main`: -+ ----- -$ git fetch upstream ----- - -3. Incorporate the commits from the remote repository, in this case -`openshift/openshift-docs`, into your local repository: -+ ----- -$ git rebase upstream/main ----- - -4. After confirming in your rebased local repository that your changes have been -merged, push the latest changes, including your work, to your GitHub account: -+ ----- -$ git push origin main ----- - -== Add changes to an existing PR, if required -In some cases you might have to make changes to a PR that you have already -submitted. The following instructions describe how to make changes to an -existing PR you have already submitted. - -1. Commit whatever updates you have made to the feature branch by creating a new -commit: -+ ----- -$ git commit -am "Detailed message as noted earlier" ----- - -2. Rebase your PR and squash multiple commits into one commit. Before you push -your changes in the next step, follow the instructions here to rebase and squash: -https://github.com/edx/edx-platform/wiki/How-to-Rebase-a-Pull-Request - -3. After you have rebased and squashed, push the latest updates to the local -feature branch to your GitHub account. -+ ----- -$ git push origin --force ----- - -The `--force` flag ignores whatever is on the remote server and replaces -everything with the local copy. You should now see the new commits in the -existing PR. Sometimes a refresh of your browser may be required. - -== Delete the local feature branch - -When you have confirmed that all of your changes have been accepted and merged, -and you have pulled the latest changes on `main` and pushed them to your -GitHub account, you can delete the local feature branch. Ensure you are in your -local repository before proceeding. - -1. Delete the local feature branch from your workstation. -+ ----- -$ git branch -D ----- - -2. Delete the feature branch from your GitHub account: -+ ----- -$ git push origin : ----- diff --git a/contributing_to_docs/doc_guidelines.adoc b/contributing_to_docs/doc_guidelines.adoc deleted file mode 100644 index 1abdc0a96da0..000000000000 --- a/contributing_to_docs/doc_guidelines.adoc +++ /dev/null @@ -1,2070 +0,0 @@ -[id="contributing-to-docs-doc-guidelines"] -= Documentation guidelines -include::_attributes/common-attributes.adoc -:toc: macro - -The documentation guidelines for OpenShift 4 build on top of the -link:https://redhat-documentation.github.io/modular-docs/[_Red Hat modular docs reference guide_]. - -[NOTE] -==== -These _Documentation guidelines_ are primarily concerned with the modular structure and AsciiDoc / AsciiBinder requirements for building OpenShift documention. For general style guidelines in OpenShift docs, see the following: - -* Primary source: link:https://www.ibm.com/docs/en/ibm-style[_IBM Style_] -* Supplementary source: link:https://redhat-documentation.github.io/supplementary-style-guide/[_Red Hat supplementary style guide for product documentation_] - -When looking for style guidance, reference the _Red Hat supplementary style guide for product documentation_ first, because it overrides certain guidance from the _IBM Style_ guide. -==== - -toc::[] - -== General file guidelines - -* Set your editor to strip trailing whitespace. -* Do *not* hard wrap lines at 80 characters (or at any other length). -+ -It is not necessary to update existing content to unwrap lines, but you can remove existing hard wrapping from any lines that you are currently working in. -+ -[TIP] -==== -In the Atom editor, you can use `Ctrl`+`J` to undo hard wrapping on a paragraph. -==== - -[id="assembly-file-metadata"] -== Assembly file metadata -Every assembly file should contain the following metadata at the top, with no line spacing in between, except where noted: - ----- -:_content-type: ASSEMBLY <1> -[id=""] <2> -= Assembly title <3> -include::_attributes/common-attributes.adoc[] <4> -:context: <5> - <6> -toc::[] <7> ----- - -<1> The content type for the file. For assemblies, always use `:_content-type: ASSEMBLY`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID. -<2> A unique (within OpenShift docs) anchor ID for this assembly. Use lowercase. Example: cli-developer-commands -<3> Human readable title (notice the `=` top-level header) -<4> Includes attributes common to OpenShift docs. -+ -[NOTE] -==== -The `{product-title}` and `{product-version}` common attributes are not defined in the `_attributes/common-attributes.adoc` file. Those attributes are pulled by AsciiBinder from the distro mapping definitions in the https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] file. See xref:product-name-and-version[Product title and version] and xref:attribute-files[attribute files] for more information on this topic. -==== -+ -<5> Context used for identifying headers in modules that is the same as the anchor ID. Example: cli-developer-commands. -<6> A blank line. You *must* have a blank line here before the toc. -<7> The table of contents for the current assembly. - -After the heading block and a single whitespace line, you can include any content for this assembly. - -[NOTE] -==== -The assembly title, which is the first line of the document, is the only level 1 ( = ) title. -Section headers within the assembly must be level 2 ( == ) or lower. When you include modules, you must add -leveloffsets in the include statements. You can manually add more level 2 or lower section headers in the assembly. -==== - -[id="module-file-metadata"] -== Module file metadata -Every module should be placed in the modules folder and should contain the following metadata at the top: - ----- -// Module included in the following assemblies: -// -// * list of assemblies where this module is included <1> - -:_content-type: <2> -[id="_{context}"] <3> -= Module title <4> ----- - -<1> The content type for the file. Replace `` with the actual type of the module, `CONCEPT`, `REFERENCE`, or `PROCEDURE`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID. -<2> List of assemblies in which this module is included. -<3> A module anchor with {context} that must be lowercase and must match the module's file name. -<4> Human readable title. To ensure consistency in the results of the -leveloffset values in include statements, you must use a level one heading -( = ) for the module title. - -Example: - ----- -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/developer-cli-commands.adoc - -:_content-type: REFERENCE -[id="cli-basic-commands_{context}"] -= Basic CLI commands ----- - -[id="snippet-file-metadata"] -== Text snippet file metadata -Every text snippet should be placed in the `snippets/` folder and should contain the following metadata at the top: - ----- -// Text snippet included in the following assemblies: <1> -// -// * list of assemblies where this text snippet is included -// -// Text snippet included in the following modules: <2> -// -// * list of modules where this text snippet is included - -:_content-type: SNIPPET <3> ----- -<1> List of assemblies in which this text snippet is included. -<2> List of modules in which this text snippet is included. -<3> The content type for the file. For snippets, always use `:_content-type: SNIPPET`. Place this attribute before the anchor ID, the conditional that contains the anchor ID, or the first line of body text. - -[NOTE] -==== -An anchor ID and human readable title are not required metadata. This type of component is text only and not intended to be published or cross referenced on its own. See <>. -==== - -Example: - ----- -// Text snippet included in the following assemblies: -// -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_gcp/installing-gcp-default.adoc - -:_content-type: SNIPPET -In {product-title} version {product-version}, you can install a cluster on {cloud-provider-first} ({cloud-provider}) that uses the default configuration options. ----- - -== Content type attributes - -Each `.adoc` file must contain a `:_content-type:` attribute in its metadata that indicates its file type. This information is used by some publication processes to sort and label files. - -Add the attribute from the following list that corresponds to your file type: - -* `:_content-type: ASSEMBLY` -* `:_content-type: CONCEPT` -* `:_content-type: PROCEDURE` -* `:_content-type: REFERENCE` -* `:_content-type: SNIPPET` - -Place the attribute in the file metadata. The following list describes the best attribute placement options: - -. Directly before the first anchor ID in a file -. If the first anchor ID is enclosed in a conditional, before the conditional -. Between the list of assemblies in which this module is included and the first line of body text - -The metadata examples contain sample placement for each file type, xref:assembly-file-metadata[assembly], xref:module-file-metadata[module], and xref:snippet-file-metadata[snippet]. - -[id="attribute-files"] -== Attribute files - -All attribute files must be placed in the `_attributes` directory. In most cases involving OpenShift Container Platform or OKD, add attributes to the `common-attributes.adoc` file instead of creating or using a separate attributes file. Before you add an attribute, review the contents of the `common-attributes.adoc` file to ensure that it is not already defined. - -[IMPORTANT] -==== -If you think that you need a separate attributes file, check with the docs team before you create it. -==== - -It is acceptable to group related attributes in the `common-attributes.adoc` file under a comment, as shown in the following example: - ----- -//gitops -:gitops-title: Red Hat OpenShift GitOps -:gitops-shortname: GitOps ----- - -It is also acceptable to enclose attributes in a xref:product-name-and-version[distro-based] conditional, but you must place attribute definitions for the `openshift-enterprise` distro first. The following example shows how to set a different value for the `:op-system-base:` attribute for OKD: - ----- -:op-system-base: RHEL -ifdef::openshift-origin[] -:op-system-base: Fedora -endif::[] ----- - -== Assembly/module file names - -Try to shorten the file name as much as possible _without_ abbreviating important terms that may cause confusion. For example, the `managing-authorization-policies.adoc` file name would be appropriate for an assembly titled "Managing Authorization Policies". - -== Directory names - -If you create a directory with a multiple-word name, separate each word with an underscore, for example `backup_and_restore`. - -[NOTE] -==== -Do not italicize user-replaced values. This guideline is an exception to the link:https://redhat-documentation.github.io/supplementary-style-guide/#user-replaced-values[_Red Hat supplementary style guide for product documentation_]. -==== - -Do not create or rename a top-level directory in the repository and topic map without checking with the docs program manager first. - -Avoid creating two levels of subdirectories because the link:https://github.com/openshift/openshift-docs/issues/52149[breadcrumb bar on docs.openshift.com breaks]. If you have a valid use case for two levels of subdirectories, talk with your DPM/CS (and, for aligned teams, the OpenShift DPM) for approval before creating it. - -When creating a new directory or subdirectory, you must create four symbolic links in it: - -* An `images` symbolic link to the top-level `images/` directory -* A `modules` symbolic link to the top-level `modules/` directory -* A `snippets` symbolic link to the top-level `snippets/` directory -* An `_attributes` symbolic link to the top-level `_attributes/` directory - -If the directory that contains an assembly does not have the `images` symbolic link, any images in that assembly or its modules will not be included properly when building the docs. - -[TIP] -==== -To create the symbolic links: - -. Navigate to the directory that you need to add the links in. -. Use the following command to create a symbolic link: -+ ----- -$ ln -s ----- -+ -For example, if you are creating the links in a directory that is two levels deep, such as `cli_reference/openshift_cli`, use the following commands: -+ ----- -$ ln -s ../../images/ images -$ ln -s ../../modules/ modules -$ ln -s ../../snippets/ snippets -$ ln -s ../../_attributes/ _attributes ----- -+ -Be sure to adjust the number of levels to back up (`../`) depending on how deep your directory is. - -If you accidentally create an incorrect link, you can remove that link by using `unlink `. -==== - -== Assembly/Module titles and section headings - -Use sentence case in all titles and section headings. See http://www.titlecase.com/ or https://convertcase.net/ for a conversion tool. - -Try to be as descriptive as possible with the title or section headings -without making them unnecessarily long. For assemblies and task modules, -use a gerund form in headings, such as: - -* Creating -* Managing -* Using - -Do not use "Overview" as a heading. - -Do not use backticks or other markup in assembly or module headings. - -Use only one level 1 heading (`=`) in any file. - -=== Discrete headings - -If you have a section heading that you do not want to appear in the TOC (like if you think that some section is not worth showing up or if there are already too many nested levels), you can use a discrete (or floating) heading: - -https://docs.asciidoctor.org/asciidoc/latest/blocks/discrete-headings/ - -A discrete heading also will not get a section number in the Customer Portal build of the doc. Previously, we would use plain bold mark-up around a heading like this, but discrete headings also allow you to ignore section nesting rules (like jumping from a `==` section level to a `====` level if you wanted for some style reason). - -To use a discrete heading, just add `[discrete]` to the line before your unique ID. For example: - ----- -[discrete] -[id="managing-authorization-policies_{context}"] -== Managing authorization policies ----- - -== Anchoring titles and section headings - -All titles and section headings must have an anchor ID. The anchor ID must be similar to the title or section heading. - -=== Anchoring in assembly files - -The following is an example anchor ID in an assembly file: - ----- -[id="configuring-alert-notifications"] -= Configuring alert notifications ----- - -[NOTE] -==== -Do not include line spaces between the anchor ID and the section title. -==== - -=== Anchoring in module files - -You must add the `{context}` variable to the end of each anchor ID in module files. When called, the `{context}` variable is resolved into the value declared in the `:context:` attribute in the corresponding assembly file. This enables cross-referencing to module IDs in context to a specific assembly and is useful when a module is included in multiple assemblies. - -[NOTE] -==== -The `{context}` variable must be preceded by an underscore (`_`) when declared in an anchor ID. -==== - -The following is an example of an anchor ID for a module file title: - ----- -[id="sending-notifications-to-external-systems_{context}"] -= Sending notifications to external systems ----- - -The following is an example of an anchor ID for a second level (`==`) heading: - ----- -[id="deployment-scaling-benefits_{context}"] -== Deployment and scaling benefits ----- - -=== Anchoring "Prerequisites", "Additional resources", and "Next steps" titles in assemblies - -Use unique IDs for "Prerequisites", "Additional resources", and "Next steps" titles in assemblies. You can add the prefixes `prerequisites_`, `additional-resources_`, or `next-steps_` to a unique string that describes the assembly topic. The unique string can match the value assigned to the `:context:` attribute in the assembly. - -[NOTE] -==== -The `prerequisites_`, `additional-resources_`, and `next-steps_` prefixes must end with an underscore (`_`) when declared in an anchor ID in an assembly. -==== - -The following examples include IDs that are unique to the "Configuring alert notifications" assembly: - -*Example unique ID for a "Prerequisites" title* - ----- -[id="prerequisites_configuring-alert-notifications"] -== Prerequisites ----- - -*Example unique ID for an "Additional resources" title* - ----- -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources ----- - -*Example unique ID for a "Next steps" title* - ----- -[id="next-steps_configuring-alert-notifications"] -== Next steps ----- - -== Writing assemblies -An _assembly_ is a collection of modules that describes how to accomplish a user story. - -Avoid link:https://redhat-documentation.github.io/modular-docs/#nesting-assemblies[nesting assemblies] in other assembly files. You can create more complicated document structures by modifying the link:https://github.com/openshift/openshift-docs/tree/main/_topic_maps[topic maps]. - -For more information about forming assemblies, see the -link:https://redhat-documentation.github.io/modular-docs/#forming-assemblies[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_ASSEMBLY_a-collection-of-modules.adoc[assembly template]. - -[NOTE] -==== -When using the "Prerequisites", "Next steps", or "Additional resources" headings in an assembly, use `==` formatting, such as `== Prerequisites` or `== Additional resources`. Use of this heading syntax at the assembly level indicates that the sections relate to the whole assembly. - -Only use `.` formatting (`.Additional resources`) to follow a module in an assembly. Because you cannot use the xrefs in modules, this functions as a _trailing include_ at the assembly level, where the `.` formatting of the `include` statement indicates that the resource applies specifically to the module and not to the assembly. -==== - -== Writing concepts -A _concept_ contains information to support the tasks that users want to do and -must not include task information like commands or numbered steps. In most -cases, create your concepts as individual modules and include them in -appropriate assemblies. - -Avoid using gerunds in concept titles. "About " -is a common concept module title. - -For more information about creating concept modules, see the -link:https://redhat-documentation.github.io/modular-docs/#creating-concept-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_CONCEPT_concept-explanation.adoc[concept template]. - -== Writing procedures -A _procedure_ contains the steps that users follow to complete a process or task. Procedures contain ordered steps and explicit commands. In most cases, create your procedures as individual modules and include them in appropriate assemblies. - -Use a gerund in the procedure title, such as "Creating". - -For more information about writing procedures, see the -link:https://redhat-documentation.github.io/modular-docs/#creating-procedure-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template]. - -[NOTE] -==== -When needed, use `.Prerequisites`, `.Next steps`, or `.Additional resources` syntax to suppress TOC formatting within a module. Do not use `==` syntax for these headings in modules. Because you cannot use the xrefs in modules, if you need to include a link under one of these headings, place the entire subsection in the assembly instead. -==== - -[id="writing-text-snippets"] -== Writing text snippets -A _text snippet_ is an optional component that lets you reuse content in multiple modules and assemblies. Text snippets are not a substitute for modules but instead are a more granular form of content reuse. While a module is content that a reader can understand on its own (like an article) or as part of a larger body of work (like an assembly), a text snippet is not self-contained and is not intended to be published or cross referenced on its own. - -In the context of modules and assemblies, text snippets do not include headings or anchor IDs. Text snippets also cannot contain xrefs. This type of component is text only. Examples include the following: - -* Admonitions that appear in multiple modules. -* An introductory paragraph that appears in multiple assemblies. -* The same series of steps that appear in multiple procedure modules. -* A deprecation statement that appears in multiple sets of release notes. - -Example: - -You could write the following paragraph once and include it in each assembly that explains how to install a cluster using the installer-provisioned default values: - -[source,text] ----- -In {product-title} version {product-version}, you can install a cluster on {cloud-provider-first} ({cloud-provider}) that uses the default configuration options. ----- - -[NOTE] -==== -In the example, `cloud-provider-first` and `cloud-provider` are not defined by the `common-attributes` module. If you use an attribute that is not common to OpenShift docs, make sure to define it locally in either the assembly or module, depending on where the text snippet is included. Because of this, consider adding all attributes that you add to snippets to the `common-attributes.adoc` file. -==== - -For more information about creating text snippets, see the -link:https://redhat-documentation.github.io/modular-docs/#using-text-snippets[_Red Hat modular docs reference guide_]. - -[id="Auto-generated-content"] -== Auto-generated content - -The following content is auto-generated in each release and must not be manually edited: - -* The OpenShift CLI (`oc`) command references `modules/oc-by-example-content.adoc` and `modules/oc-adm-by-example-content.adoc`. -* The following API references content in the `rest_api` folder: the contents of all `_apis` subfolders and the `rest_api/objects/index.adoc` and `rest_api/index.adoc` assemblies. -* OpenShift Virtualization runbook modules: `modules/virt-runbook-.adoc`. - -[NOTE] -==== -If the content in these files needs to be updated, the update must be made in the applicable code repository where these files are generated from. The updates are reflected when the files are generated the next time, for example a future release. For help with where to make the updates, you can contact https://github.com/bergerhoffer[Andrea Hoffer] for the CLI docs, https://github.com/jboxman-rh[Jason Boxman] for the API docs, or https://github.com/apinnick[Avital Pinnick] for the OpenShift Virtualization runbooks. -==== - -[id="using-conscious-language"] -== Using conscious language - -To assist with the removal of the problematic word "master" from the documentation, use the following terminology when referring to OpenShift control plane nodes: - -[options="header"] -|=== -|Branch |Control plane node reference - -|`main`, `enterprise-4.9`, and later enterprise versions -|Control plane node - -|`enterprise-4.8` and earlier enterprise versions -|Control plane (also known as master) node - -|`enterprise-3.11` -|Master node - -|=== - -You can replace "node" in the preceding examples with "machine", "host", or another suitable description. - -In general text, use the term "control plane machine" in place of "master machine"; use the term "compute machine" in place of "worker machine". Be mindful of certain valid code entities, such as `master` role, `worker` role, and `infra` role. - -[NOTE] -==== -If you are cherry picking from `main` to `enterprise-4.8` or earlier, you must manually cherry pick to include the “(also known as master)” phrasing. This is required only if the phrase “control plane” is introduced for the first time in an assembly or module. -==== - -[id="adding-a-subsection-on-making-open-source-more-inclusive"] -=== Adding a subsection on making open source more inclusive - -If you create a release notes assembly for a sub-product within the `openshift/openshift-docs` repo, you might include a "Making open source more inclusive" statement. Instead of pasting the statement from the OpenShift Release Notes, use the following module, which is available in the `enterprise-4.8` branch and later: - -[source,text] ----- -\include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] ----- - -[id="product-name-and-version"] -== Product title and version - -When possible, generalize references to the product name and/or version by using -the `{product-title}` and/or `{product-version}` attributes. These attributes -are pulled by AsciiBinder from the OpenShift distribution, or _distro_, mapping definitions in the -https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] -file. - -The `{product-title}` comes from the first `name:` field in a distro mapping, -while the associated `{product-version}` comes from the `name:` fields on any -`branches:` defined. - -How these attributes render is dependent on which distro and branch build you -are viewing. The following table shows the current distros and the -possible values for `{product-title}` and `{product-version}`, depending on the branch: - -[options="header"] -|=== -|Distro |`{product-title}` |`{product-version}` - -|`openshift-origin` -|OKD -a|* 3.6, 3.7, 3.9, 3.10, 3.11 -* 4.8, 4.9, 4.10, 4.11, 4.12, 4.13 -* 4 for the `latest/` build from the `main` branch - -|`openshift-enterprise` -|OpenShift Container Platform -a|* 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.9, 3.10, 3.11 -* 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 4.10, 4.11, 4.12, 4.13, 4.14 - -|`openshift-dedicated` -|OpenShift Dedicated -a|* No value set for the latest `dedicated/` build from the `enterprise-4.13` branch -* 3 for the `dedicated/3` build from the `enterprise-3.11` branch - -|`openshift-rosa` -|Red Hat OpenShift Service on AWS -|No value set for the `rosa/` build from the `enterprise-4.13` branch - -|`openshift-online` -|OpenShift Online -|Pro -|=== - -For example: - ----- -You can deploy applications on {product-title}. ----- - -This is a safe statement that could appear in probably any of the builds, so an -https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/contributing.adoc#conditional-text-between-products[ifdef/endif -statement] is not necessary. For example, if you were viewing a build for the -`openshift-enterprise` distro (for any of the distro-defined branches), this -would render as: - -> You can deploy applications on OpenShift Container Platform. - -And for the `openshift-origin` distro: - -> You can deploy applications on OKD. - -Considering that we use distinct branches to keep content for product versions separated, global use of `{product-version}` across all branches is probably less useful, but it is available if you come across a requirement for it. Just consider how it will render across any branches that the content appears in. - -If it makes more sense in context to refer to the major version of the product instead of a specific minor version (for example, if comparing how something in OpenShift Container Platform 4 differs from OpenShift Container Platform 3), just use the major version number. Do not prepend with a `v`, as in `v3` or `v4`. - -[NOTE] -==== -Other common attribute values are defined in the `_attributes/common-attributes.adoc` file. Where possible, generalize references to those values by using the common attributes. For example, use `{cluster-manager-first}` to refer to Red Hat OpenShift Cluster Manager. If you need to add an attribute to the `_attributes/common-attributes.adoc` file, open a pull request to add it to the attribute list. Do not create a separate attributes file without first consulting the docs team. -==== - -//CANARY -[id="conditional-content"] -== Conditional content - -You can use ifdef and ifeval statements to control the way content displays in different distributions and assemblies. - -NOTE: You can nest conditional statements that involve distribution and assembly context, but you must ensure that you close the if statements correctly. - -Because we maintain separate branches for each OpenShift Container Platform version, do not use if statements that are based on product version to vary content. - -[id="conditionals-for-distributions"] -=== Conditionals for distributions - -Use ifdef and ifndef statements to control content based on distribution, as described in the previous section. For example, the following example renders differently in (`openshift-origin`) and OpenShift Container Platform (`openshift-enterprise`): - ----- -\ifdef::openshift-origin[] -You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#openshift[configure a Keycloak] server as an OpenID -Connect identity provider for {product-title}. -\endif::[] - -\ifdef::openshift-enterprise[] -You can -link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/[configure Red Hat Single Sign-On] -as an OpenID Connect identity provider for {product-title}. -\endif::[] ----- - -In OKD, this section renders as the following text: - -> You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#openshift[configure a Keycloak] server as an OpenID -Connect identity provider for OKD. - -In OpenShift Container Platform, this section renders as the following text: - -> You can -link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/[configure Red Hat Single Sign-On] -as an OpenID Connect identity provider for OpenShift Container Platform. - - -[id="conditionals-for-assemblies"] -=== Conditionals for different assemblies - -Use a combination of ifdef and ifeval statements to control content that needs to vary between assemblies. These conditional statements rely on a combination of the context attribute for each assembly and specific temporary attributes within each module to control content. - -The following sample shows a simple example. In the assembly that contains the `context` attribute `updating-restricted-network-cluster`, an extra paragraph is displayed. - ----- -\ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -\endif::[] - -... - -\ifdef::restricted[] -If you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to. -\endif::restricted[] - -... - -\ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -\endif::[] ----- - -Note that you must set and unset each temporary attribute that you introduce to an assembly. Use the temporary attributes in the applicable ifdef and ifndef statements to vary text between the assemblies. The preceeding example uses `restricted` as the temporary attribute to display an additional paragraph for the assembly with the `updating-restricted-network-cluster` context attribute. - -== Node names - -Do not use internal company server names in commands or example output. Provide generic OpenShift Container Platform node name examples that are not provider-specific, unless required. Where possible, use the example.com domain name when providing fully qualified domain names (FQDNs). - -The following table includes example OpenShift Container Platform 4 node names and their corresponding role types: - -[options="header"] -|=== - -|Node name |Role type - -|*node-1.example.com* -.3+.^|You can use this format for nodes that do not need role-specific node names. - -|*node-2.example.com* - -|*node-3.example.com* - -|*control-plane-1.example.com* -.3+.^|You can use this format if you need to describe the control plane role type within a node name. - -|*control-plane-2.example.com* - -|*control-plane-3.example.com* - -|*compute-1.example.com* -.2+.^|You can use this format if you need to describe the compute node role type within a node name. - -|*compute-2.example.com* - -|*bootstrap.example.com* -|You can use this format if you need to describe the bootstrap node role type within a node name. -|=== - -This example lists the status of cluster nodes that use the node name formatting guidelines: - -.... -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -compute-1.example.com Ready worker 33m v1.19.0+9f84db3 -control-plane-1.example.com Ready master 41m v1.19.0+9f84db3 -control-plane-2.example.com Ready master 45m v1.19.0+9f84db3 -compute-2.example.com Ready worker 38m v1.19.0+9f84db3 -compute-3.example.com Ready worker 33m v1.19.0+9f84db3 -control-plane-3.example.com Ready master 41m v1.19.0+9f84db3 ----- -.... - -[NOTE] -==== -Some provider-formatted hostnames include IPv4 addresses. An OpenShift Container Platform node name typically reflects the hostname of a node. If node names in your output need to be provider-specific and require this format, use private IPv4 addresses. For example, you could use `ip-10-0-48-9.example.com` as a node name that includes a private IPv4 address. -==== - -== IP addresses - -You may include IPv4 addresses from test clusters in examples in the documentation, as long as they are private. Private IPv4 addresses fall into one of the following ranges: - -* 10.0.0.0 to 10.255.255.255 (class A address block 10.0.0.0/8) -* 172.16.0.0 to 172.31.255.255 (class B address block 172.16.0.0/12) -* 192.168.0.0 to 192.168.255.255 (class C address block 192.168.0.0/16) - -Replace all public IP addresses with an address from the following blocks. These address blocks are reserved for documentation: - -* 192.0.2.0 to 192.0.2.255 (TEST-NET-1 address block 192.0.2.0/24) -* 198.51.100.0 to 198.51.100.255 (TEST-NET-2 address block 198.51.100.0/24) -* 203.0.113.0 to 203.0.113.255 (TEST-NET-3 address block 203.0.113.0/24) - -[NOTE] -==== -There might be advanced networking examples that require specific IP addresses, or cloud provider-specific examples that require a public IP address. Contact a subject matter expert if you need assistance with replacing IP addresses. -==== - -== Links, hyperlinks, and cross references -Links can be used to cross-reference internal assemblies or send readers to external information resources for further reading. - -In OpenShift docs: - -* All links to internal content is created using `xref` and **must have an anchor ID**. -* Only use `xref` in assemblies, not in modules. -* All links to external websites are created using `link`. - -[IMPORTANT] -==== -Do not split link paths across lines when wrapping text. This will cause issues with the doc builds. -==== - -=== Example URLs -To provide an example URL path that you do not want to render as a hyperlink, use this format: - -.... -`\https://www.example.com` -.... - -=== Internal cross-references - -Use the relative file path (from the file you are editing to the file you are linking to), even if you are linking to the same directory that you are writing in. This makes search and replace operations to fix broken links much easier. - -For example, if you are writing in `architecture/core_concepts/deployments.adoc` and you want to link to `architecture/core_concepts/routes.adoc`, then you must include the path back to the first level of the assembly directory: - ----- -xref:../../architecture/networking/routes.adoc#architecture-core-concepts-routes ----- - -[NOTE] -==== -In OpenShift docs, you can only use `xref` in assemblies, not in modules. -==== - -.Markup example of cross-referencing ----- -For more information, see xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[Creating an application]. - -Rollbacks can be performed using the REST API or the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI]. ----- - -.Rendered output of cross-referencing -> For more information, see xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[Creating an application]. -> -> Rollbacks can be performed using the REST API or the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI]. - -=== Links to external websites - -If you want to link to a different website, use: - ----- -link:http://othersite.com/otherpath[friendly reference text] ----- - -IMPORTANT: You must use `link:` before the start of the URL. - -IMPORTANT: You cannot link to a repository that is hosted on www.github.com. - -TIP: If you want to build a link from a URL _without_ changing the text from the actual URL, just print the URL without adding a `[friendly text]` block at the end; it will automatically be rendered as a link. - -=== Links to internal content -There are two scenarios for linking to other assemblies: - -1. Link to another file that exists in the same directory. -2. Link to another file that exists in a separate directory. - -The following examples use the example directory structure shown here: -.... -/ -/foo -/foo/bar.adoc -/baz -/baz/zig.adoc -/baz/zag.adoc -.... - -*Link to assembly in same directory* - ----- -xref:#anchor-id[friendly title] ----- - -You must use the `.adoc` file extension. The document processor will correctly link this to the resulting HTML file. - -For example, using the above syntax, if you are working on `zig.adoc` and want to link to `zag.adoc`, do it this way: - ----- -xref:../zag.adoc#baz-zag[comment] ----- - -where `baz-zag` is the anchor ID at the top of the file `zag.adoc`. - -*Link to assembly in different directory* - ----- -xref:../dir/.adoc#anchor-id[friendly title] ----- - -For example, if you are working on `bar.adoc` and you want to link to `zig.adoc`, do it this way: - ----- -For more information, see the xref:../baz/zig.adoc#baz-zig[ZIG manual]. ----- - -[NOTE] -==== -You must use the `.adoc` extension in order for the link to work correctly and you must specify an anchor ID. -==== - -== Embedding an external file - -You can embed content hosted outside the link:https://github.com/openshift/openshift-docs[openshift-docs] -GitHub repository by using the `include` directive to target the URI of a raw -file. This is helpful for cases where content frequently changes; you embed the raw -file and the content auto-updates based on the changes made to the content on its -host site. - -[IMPORTANT] -==== -You are restricted to only embed files from GitHub repositories managed by the -`openshift` GitHub user. You must also prefix your external file URI with `https`. -URIs beginning with `http` are forbidden for security reasons and will fail the -documentation build. -==== - -For example, if you want to embed the link:https://github.com/openshift/installer/blob/release-4.8/upi/azure/01_vnet.json[01_vnet.json] template, include the URI of its raw file version like this: - -``` -.`01_vnet.json` ARM template -[source,json] ----- -\include::https://raw.githubusercontent.com/openshift/installer/release-4.8/upi/azure/01_vnet.json[] ----- -``` - -[NOTE] -==== -Embedding external files is restricted for files that change frequently, like templates. You must ensure that embedded files are QE verified before they are updated on their host site. -==== - -[NOTE] -==== -You must get approval from the Engineering, QE, and Docs teams before embedding an external file. -==== - -== Embedding a local YAML file - -You can embed local YAML files in AsciiDoc modules. -Consider embedding a local YAML file when you have a complete and valid YAML file that you want to use. -This is useful when you want to include a complete YAML CR in the docs. -The YAML file that you include must be a local file maintained in the link:https://github.com/openshift/openshift-docs[openshift-docs] GitHub repository. -Use the `include` directive to target the local file. - -To use a local YAML file, add it to the `snippets/` folder, and include it in your module. For example: - -[source,yaml] ----- -\include::snippets/install-config.yaml[] ----- - -[NOTE] -==== -Do not include link:https://docs.asciidoctor.org/asciidoc/latest/directives/include-lines/[lines by content ranges]. This approach can lead to content errors when the included file is subsequently updated. -==== - -[IMPORTANT] -==== -If the YAML file you want to include is from a GitHub repository that is managed by the `openshift` GitHub user, link to the file directly rather than copying the file to the `/openshift-docs` folder. -==== - -[discrete] -=== Using AsciiDoc callouts in the YAML - -You can use AsciiDoc callouts in the YAML file. -Comment out the callout in the YAML file to ensure that file can still be parsed as valid YAML. -Asciidoctor recognises the commented callout and renders it correctly in the output. -For example: - -`apiVersion: v1 # <1>` - -[discrete] -=== Version and upgrade implications - -Carefully consider the version and upgrade implications of including the local YAML file in your content. Including a local YAML file can increase the maintenance overhead for the content. -If you have a doubt, talk to your content strategist or docs team lead. - -[discrete] -=== Validating the local YAML file - -Before you include the YAML file, use a YAML linter or the `oc` CLI to verify that the YAML is valid. -For example, to validate the `snippets/SiteConfig.yaml` file using `oc`, log in to a cluster and run the following command from a terminal opened in the `openshift-docs/` folder: - -[source,terminal] ----- -$ oc apply -f snippets/SiteConfig.yaml --dry-run=client ----- - -.Example output -[source,terminal] ----- -siteconfig.ran.openshift.io/example-sno created (dry run) ----- - -Running `oc` with the `--dry-run=client` switch does not succeed with an invalid YAML file. - -== Indicating Technology Preview features - -To indicate that a feature is in Technology Preview, include the `snippets/technology-preview.adoc` file in the feature's assembly or module to keep the supportability wording consistent across Technology Preview features. Provide a value for the `:FeatureName:` variable before you include this module. - -[source,text] ----- -:FeatureName: The XYZ plug-in -\include::snippets/technology-preview.adoc[] ----- - -== Indicating deprecated features - -To indicate that a feature is deprecated, include the `modules/deprecated-feature.adoc` file in the feature's assembly, or to each relevant assembly such as for a deprecated Operator, to keep the supportability wording consistent across deprecated features. Provide a value for the `:FeatureName:` variable before you include this module. - -For more information on how this is applied, see link:https://github.com/openshift/openshift-docs/pull/31776/files[this example PR]. - -== Verification of your content -All documentation changes must be verified by a QE team associate before merging. This includes executing all "Procedure" changes and confirming expected results. There are exceptions for typo-level changes, formatting-only changes, and other negotiated documentation sets and distributions. - -If a documentation change is due to a Bugzilla bug or Jira issue, the bug/issue should be put on ON_QA when you have a PR ready. After QE approval is given (either in the bug/issue or in the PR), the QE associate should move the bug/issue status to VERIFIED, at which point the associated PR can be merged. It is also ok for the assigned writer to change the status of the bug/issue to VERIFIED if approval for the changes has been provided in another forum (slack, PR, or email). The writer should indicate that the QE team approved the change as a comment in the bug/issue. - -== Images - -=== Image format - -Use `*.png` format images. - -=== Block images - -To include a block image (an image on its own line): - -1. Put the image file in the `images` folder. -+ -Ensure that the folder containing your assembly contains an `images` symbolic link to the top-level `images/` directory, otherwise the image will not be found when building the docs. - -2. In the `.adoc` content, use this format to link to the image: -+ ----- -image::[] ----- -+ -Note the double `::` instead of a single `:`, as seen in inline image usage. -You only have to specify `` itself and not the full file path; -the build mechanism automatically expands this appropriately. - -=== Inline images (icons) - -Inline images can be used to indicate graphic items in the web console, such as -buttons or menu icons. - -==== Inserting reusable images inline - -To simplify reuse, the following common SVGs (the OpenShift web console uses the -Font Awesome icon set) have already been added to the `images` folder with a -user-defined entity added to the `common-attributes.adoc` module: - -|=== -|Icon |Entity |Alt text |File name - -|Kebab -|`:kebab:` -|Options menu -|`ellipsis-v.svg` - -|=== - -When using inline, include the image after the UI element name. For example: - ----- -Click the *Options* menu {kebab}. ----- - -==== Inserting images inline without reuse - -If you are inserting an image that is not part of the `common-attributes.adoc` -module, then include the image using this formatting: - ----- -image:[title=""] ----- - -Note the single `:` instead of a double `::`, as seen in block image usage. - -For example: - ----- -image:manage-columns.png[title="Manage Columns icon"] ----- - -== Formatting - -For all of the system blocks including table delimiters, use four characters. For example: - -.... -|=== for tables ----- for code blocks -.... - -[NOTE] -==== -You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title. -==== - -=== Code blocks, command syntax, and example output - -Code blocks are generally used to show examples of command syntax, example -screen output, and configuration files. - -The main distinction between showing command syntax and a command example is -that a command syntax shows readers how to use the command without real values. -An example command, however, shows the command with actual values with an -example output of that command, where applicable. - -For example: - -.... -In the following example, the `oc get` operation returns a complete list of services that are currently defined: - -[source,terminal] ----- -$ oc get se ----- - -.Example output -[source,terminal] ----- -NAME LABELS SELECTOR IP PORT -kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443 -kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80 -docker-registry name=registrypod 172.30.17.158 5001 ----- -.... - -This renders as: - -> In the following example, the `oc get` operation returns a complete list of services that are currently defined: -> -> ---- -> $ oc get se -> ---- -> -> .Example output -> ---- -> NAME LABELS SELECTOR IP PORT -> kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443 -> kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80 -> docker-registry name=registrypod 172.30.17.158 5001 -> ---- - -The following guidelines go into more detail about specific requirements and -recommendations when using code blocks: - -* If a step in a procedure is to run a command, make sure that the step -text includes an explicit instruction to "run" or "enter" the command. In most cases, -use one of the following patterns to introduce the code block: - -** by running the following command: -** by entering the following command: -** , run the following command: -** , enter the following command: - -* Do NOT use any markup in code blocks; code blocks generally do not accept any markup. - -* For all code blocks, you must include an empty line above a code block (unless -that line is introducing block metadata, such as `[source,terminal]` for syntax -highlighting). -+ -Acceptable: -+ -.... -Lorem ipsum - ----- -$ lorem.sh ----- -.... -+ -Not acceptable: -+ -.... -Lorem ipsum ----- -$ lorem.sh ----- -.... -+ -Without the line spaces, the content is likely to be not parsed correctly. - -* Use `[source,terminal]` for `oc` commands or any terminal commands to enable -syntax highlighting. Any `[source]` metadata must go on the line directly before -the code block. For example: -+ -.... -[source,terminal] ----- -$ oc get nodes ----- -.... -+ -If you are also showing a code block for the output of the command, use -`[source,terminal]` for that code block as well. - -* Use source tags for the programming language used in the code block to enable -syntax highlighting. For example: - -** `[source,yaml]` -** `[source,go]` -** `[source,javascript]` -** `[source,jsx]` - -* Do not use more than one command per code block. For example, the following must -be split up into three separate code blocks: -+ -.... -To create templates you can modify, run the following commands: - -[source,terminal] ----- -$ oc adm create-login-template > login.html ----- - -[source,terminal] ----- -$ oc adm create-provider-selection-template > providers.html ----- - -[source,terminal] ----- -$ oc adm create-error-template > errors.html ----- -.... - -* If your command contains multiple lines and uses callout annotations, you must comment out the callout(s) in the codeblock, as shown in the following example: -+ -.... -To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing object: - -[source,terminal] ----- -$ oc autoscale / \// <1> - --min \// <2> - --max \// <3> - --cpu-percent= <4> ----- -<1> Specify the type and name of the object to autoscale. -<2> Optional: Specify the minimum number of replicas when scaling down. -<3> Specify the maximum number of replicas when scaling up. -<4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. -.... - -* Separate a command and its related example output into individual code blocks. -This allows the command to be easily copied using the button on -+++docs.openshift.com+++. -+ -In addition, prepend the code block for the output with the title `.Example output` -to make it consistently clear across the docs when this is being represented. A -lead-in sentence explaining the example output is optional. For example: -+ -.... -Use the `oc new-project` command to create a new project: - -[source,terminal] ----- -$ oc new-project my-project ----- - -The output verifies that a new project was created: - -.Example output -[source,terminal] ----- -Now using project "my-project" on server "https://openshift.example.com:6443". ----- -.... - -* To mark up command syntax, use the code block and wrap any replaceable values in angle brackets (`<>`) with the required command parameter, using underscores (`_`) between words as necessary for legibility. Do not italicize user-replaced values. For example: -+ -.... -To view a list of objects for the specified object type, enter the following command: - -[source,terminal] ----- -$ oc get ----- -.... -+ -This renders as: -+ --- -> To view a list of objects for the specified object type, enter the following command: -> -> ---- -> $ oc get -> ---- --- -+ -NOTE: Avoid using full command syntax inline with sentences. - -* When you specify link:https://kubernetes.io/docs/reference/kubectl/#resource-types[resource names] in `oc` commands, use the full name of the resource type by default. You can use the abbreviation of the resource type name if it improves readability, such as with very long commands, or to be consistent with existing content in the same assembly. -+ -For example, use `namespaces` instead of `ns` and `poddisruptionbudgets` instead of `pdb`. - -* When referring to a path to a location that the user has selected or created, treat the part of the path that the user chose as a replaceable value. For example: -+ -.... -Create a secret that contains the certificate and key in the `openshift-config` namespace: - -[source,terminal] ----- -$ oc create secret tls --cert=/cert.crt --key=/cert.key -n openshift-config ----- -.... -+ -This renders as: -+ --- -> Create a secret that contains the certificate and key in the `openshift-config` namespace: -> -> ---- -> $ oc create secret tls --cert=/cert.crt --key=/cert.key -n openshift-config -> ---- --- -+ -The following example shows a more complex use of user-chosen elements and prescriptive placement: -+ -.... -/providers/Microsoft.Compute/diskEncryptionSets/ -.... - -* If you must provide additional information on what a line of a code block -represents, use callouts (`<1>`, `<2>`, etc.) to provide that information. -+ -Use this format when embedding callouts into the code block: -+ -[subs=-callouts] -.... ----- -code example 1 <1> -code example 2 <2> ----- -<1> A note about the first example value. -<2> A note about the second example value. -.... - -* If you must provide additional information on what a line of a code block -represents and the use of callouts is impractical, you can use a description list -to provide information about the variables in the code block. Using callouts -might be impractical if a code block contains too many conditional statements to -easily use numbered callouts or if the same note applies to multiple lines of the codeblock. -+ -.... ----- -code -code ----- -+ -where: - -:: Specifies the explanation of the first variable. -:: Specifies the explanation of the first variable. -.... -+ -Be sure to introduce the description list with "where:" and start each variable -description with "Specifies." - -* For long lines of code that you want to break up among multiple lines, use a -backslash to show the line break. For example: -+ ----- -$ oc get endpoints --all-namespaces --template \ - '{{ range .items }}{{ .metadata.namespace }}:{{ .metadata.name }} \ - {{ range .subsets }}{{ range .addresses }}{{ .ip }} \ - {{ end }}{{ end }}{{ "\n" }}{{ end }}' | awk '/ 172\.30\./ { print $1 }' ----- - -* If the user must run a command as root, use a number sign (`#`) at the start of the command instead of a dollar sign (`$`). For example: -+ ----- -# subscription-manager list ----- - -* For snippets or sections of a file, use an ellipsis (`...` or `# ...` for YAML) to show that the file continues before or after the quoted block. -+ ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: liveness -# ... ----- -+ -or -+ ----- -Name: ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Roles: worker -... -Taints: node-role.kubernetes.io/infra:NoSchedule -... ----- -+ -Do not use `[...]`, ``, or any other variant. - -* Do not use `jq` in commands (unless it is truly required), because this requires users to install the `jq` tool. Oftentimes, the same or similar result can be accomplished using `jsonpath` for `oc` commands. -+ -For example, this command that uses `jq`: -+ ----- -$ oc get clusterversion -o json|jq ".items[0].spec" ----- -+ -can be updated to use `jsonpath` instead: -+ ----- -$ oc get clusterversion -o jsonpath='{.items[0].spec}{"\n"}' ----- - -* For Bash "here" documents use `[source,terminal]`, such as the following example: -+ -.... -[source,terminal] ----- -$ cat < ` command: -+ -.... -[source,text] ----- -Name: node1.example.com -Roles: worker -Labels: kubernetes.io/arch=amd64 -... -Annotations: cluster.k8s.io/machine: openshift-machine-api/ahardin-worker-us-east-2a-q5dzc -... -CreationTimestamp: Wed, 13 Feb 2019 11:05:57 -0500 ----- -.... - -=== YAML formatting for Kubernetes and OpenShift API objects -The following formatting guidelines apply to YAML manifests, but do not apply to the installation configuration YAML specified by `install-config.yaml`. - -When possible, ensure that YAML is valid in a running cluster. You can validate YAML with `oc apply` with the following invocation: - ----- -$ oc apply -f test.yaml --dry-run=client ----- - -==== Required fields - -- Include the `apiVersion` and `kind` so that a user always knows the context of the YAML. -- Include the full hierarchy to a deeply nested key. -- For objects that are in the global scope, such as for `config.openshift.io` API group, always include the `metadata.name` for the object, which is usually `cluster`. - -.Example API object in the global scope ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -# ... -spec: - defaultNodeSelector: node-role.kubernetes.io/app= -# ... ----- - -.Example deeply nested key with full context for `.ports` array ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - namespace: default -spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - protocol: TCP ----- - -==== Formatting -The following conventions govern the layout of YAML for API objects: - -- Begin YAML at the beginning of the left margin. -- Use two-space indentation. -- Indent arrays at the same depth as the parent field. -- Include a space immediately after the colon for keys. -- Use block style for complex strings, such as embedded JSON or text blocks. You can enable block style by specifying `|` or `|-` after a field and indenting the field content by two spaces, such as in the following example: -+ ----- -fieldName: |- - This is a string. - And it can be on multiple lines. ----- -- When truncating YAML, comment out the ellipsis (`# ...`) because three dots (`...`) in YAML is actually a link:https://yaml.org/spec/1.2.2/#22-structures[document end marker]. -- Use three hyphens (`---`) to separate YAML definitions in a single YAML file. - -.Example with array indentation flush with parent field ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - labels: - - key1: val1 - - key2: val2 -spec: -# ... ----- - -.Example with block string for annotation ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "net" - } - ] -spec: -# ... ----- - -=== Inline code or commands -Do NOT show full commands or command syntax inline within a sentence. The next section covers how to show commands and command syntax. - -Only use case for inline commands would be general commands and operations, without replaceables and command options. In this case an inline command is marked up using the back ticks: - -.... -Use the `GET` operation to do x. -.... - -This renders as: - -> Use the `GET` operation to do x. - -=== System messages - -System messages include error, warning, confirmation, and information messages that are presented to the user in places such as the GUI, CLI, or system logs. - -If a message is short enough to include inline, enclose it in back ticks: - -.... -Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice. -.... - -This renders as: - -> Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice. - -If a message is too long to include inline, put it inside a code block with `[source,text]` metadata: - -.... -Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message: - -[source,text] ----- -When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent. ----- - -Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message. -.... - -This renders as: - -> Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message: -> -> ---- -> When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent. -> ---- -> -> Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message. - -NOTE: Always refer to a message with the type of message it is, followed by the word "message". For example, refer to an error message as an "error message", and not simply as an "error". - -=== Lists -Lists are created as shown in this example: - -.... -. Item 1 (2 spaces between the period and the first character) - -. Item 2 - -. Item 3 -.... - -This renders as: - -> . Item 1 -> . Item 2 -> . Item 3 - -If you must add any text, admonitions, or code blocks you have to add the continuous +, as shown in the example: - -.... -. Item 1 -+ ----- -some code block ----- - -. Item 2 - -. Item 3 -.... - -This renders as: - -> . Item 1 -> + -> ---- -> some code block -> ---- -> . Item 2 -> . Item 3 - -=== Footnotes - -Avoid footnotes when possible. - -If you reference a footnote from only a single location, use the following syntax: - -.Footnote -.... -footnote:[This is the footnote text.] -.... - -If you reference a footnote from multiple locations, set an attribute with the footnote text. As a consequence, this will duplicate the footnote text at bottom of the page. - -.Footnote with text set by an attribute -.... -:note-text: This is a footnote. - -This text has a footnote qualifier attached footnote:[{note-text}]. - -But this other text uses the same qualifier elsewhere footnote:[{note-text}]. -.... - -Avoid using `footnoteref`. - -[IMPORTANT] -==== -The `footnoteref` directive is deprecated in asciidoctor and causes a build warning when `ascii_binder` is run. -==== - -.Footnote with reference -.... -footnoteref:[ref-string, This is the footnote text.] -.... - -==== Alternative footnote styling in tables - -For footnotes in tables, use the following syntax to mimic Asciidoctor's -styling: - -.... -[cols="3",options="header"] -|=== -|Header 1 -|Header 2 -|Header 3 - -|Item A ^[1]^ -|Item B -|Item C ^[2]^ - -|Item D -|Item E ^[3]^ -|Item F ^[3]^ -|=== -[.small] --- -1. A description. -2. Another description. -3. Two items relate to this description. --- -.... - -The notes are kept immediately after the table, instead of moved to the bottom of the rendered assembly. This manual method also allows you to reuse the same footnote number for multiple references as needed. - -Note the following: - -* Add a space before the superscripted numbers with square brackets. -* To match the table cell's font size, start the ordered list with a `[.small]` -style and wrap it in a `--` block. - -[id="collapsible-content"] -=== Collapsible content -You can collapse sections of content by using the `collapsible` option, which converts the Asciidoctor markup to HTML `details` and `summary` sections. The `collapsible` option is used at the writer's discretion and is appropriate for considerably long code blocks, lists, or other such content that significantly increases the length of a module or assembly. - -[NOTE] -==== -You must set a title for the `summary` section. If a title is not set, the default title is "Details." -==== - -Collapsible content is formatted as shown: - -.... -.Title of the `summary` dropdown -[%collapsible] -==== -This is content within the `details` section. -==== -.... - -This renders as a dropdown with collapsed content: - -.Title of the `summary` dropdown -[%collapsible] -==== -This is content within the `details` section. -==== - -If your collapsible content includes an admonition such as a note or warning, the admonition must be nested: - -.... -.Collapsible content that includes an admonition -[%collapsible] -==== -This content includes an admonition. - -[source,terminal] ----- -$ oc whoami ----- - -[NOTE] -===== -Nest admonitions when using the `collapsible` option. -===== -==== -.... - -This renders as: - -.Collapsible content that includes an admonition -[%collapsible] -==== -This content includes an admonition. - -[source,terminal] ----- -$ oc whoami ----- - -[NOTE] -===== -Nest admonitions when using the `collapsible` option. -===== -==== - -=== Quick reference - -.User accounts and info -[option="header"] -|=== -|Markup in command syntax |Description |Substitute value in Example block - -|`` -|Name of user account -|user@example.com - -|`` -|User password -|password -|=== - -[IMPORTANT] -==== -Do not use a password format that matches the format of a real password. Documenting such a password format can cause the following issues: - -* Indicates that Red Hat publicly exposes sensitive data in their documentation. -* Leads to additional security incidents that the Information Security, InfoSec, team must investigate. Such security incidents, although minor, can impact the InfoSec team's resources and potentially delay them from focusing on actual security incidents. -==== - -.Projects and applications -[option="header"] -|=== -|Markup in command syntax |Description |Substitute value in Example block - -|`` -|Name of project -|myproject - -|`` -|Name of an application -|myapp -|=== - -=== Additional resources sections - -The following guidelines apply to all "Additional resources" sections: - -* You must include the `[role="_additional-resources"]` attribute declaration before the section heading. -* You must not include paragraphs in the section. Use an unordered list. -* The links and xrefs in the unordered list must contain human-readable text between the square brackets. -* Each item in the unordered list must contain a minimum of text besides the link or xref. - -Additionally, in an assembly, use `==` formatting for the section heading (`== Additional resources`). Use of this heading syntax at the assembly level indicates that the sections relate to the whole assembly. For example: - ----- -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources -* link:some-url.com[Human readable label] -* xref:some_xref[Human readable label] -* xref:some_other_xref[Human readable label] ----- - -Only use `.` formatting (`.Additional resources`) in a module or to follow a module in an assembly. Because you cannot use the xrefs in modules, this functions as a _trailing include_ at the assembly level, where the `.` formatting of the `include` statement indicates that the resource applies specifically to the module and not to the assembly. For example: - ----- -[role="_additional-resources"] -.Additional resources -* link:some-url.com[Human readable label] -* xref:some_xref[Human readable label] -* xref:some_other_xref[Human readable label] ----- - -== Admonitions -Admonitions such as notes and warnings are formatted as shown: - -.... -[ADMONITION] -==== -Text for admonition -==== -.... - -See the link:https://redhat-documentation.github.io/supplementary-style-guide/#admonitions[Red Hat Supplementary style guide] for the valid admonition types and their definitions. - -[id="api-object-formatting"] -== API object formatting - -For terms that are API objects, the way they are written depends on whether the term is a general reference or an actual reference to the object. - -[id="api-object-general-references"] -=== General references - -A general reference is any time you are speaking conceptually, or generally, about these components in a cluster. - -When referring to API object terms in general usage, use lowercase and separate multi-word API objects. *Default to following this guidance unless you are specifically interacting with/referring to the API object (see xref:api-object-object-references[Object references]).* - -For example: - -* pod -* node -* daemon set -* config map -* deployment -* image stream -* persistent volume claim - -.Examples of general references -.... -Kubernetes runs your workload by placing containers into pods to run on nodes. - -You must have at least one secret, config map, or service account. - -The total number of persistent volume claims in a project. -.... - -Note that if an object uses an acronym or other special capitalization, then its general reference should honor that. For example, general references to `APIService` should be written as "API service", not "api service". Any other exceptions or special guidance are noted in the xref:../contributing_to_docs/term_glossary.adoc[glossary]. - -[id="api-object-object-references"] -=== Object references - -An object reference is when you are referring to the actual instance of an API object, where the object name is important. - -When referring to actual instances of API objects, use link:https://en.wikipedia.org/wiki/Camel_case#Variations_and_synonyms[PascalCase] and mark it up as monospace in backticks (````). - -[NOTE] -==== -Do not use backticks or other markup in assembly or module headings. You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title. -==== - -Be sure to match the proper object type (or `kind` in Kubernetes terms); for example, do not add an "s" to make it plural. *Only follow this guidance if you are explicitly referring to the API object (for example, when editing an object in the CLI or viewing an object in the web console).* - -For example: - -* `Pod` -* `Node` -* `DaemonSet` -* `ConfigMap` -* `Deployment` -* `ImageStream` -* `PersistentVolumeClaim` - -.Examples of API object references -.... -After you create a `Node` object, or the kubelet on a node self-registers, the control plane checks whether the new `Node` object is valid. - -The default amount of CPU that a container can use if not specified in the `Pod` spec. - -Create a file, `pvc.yaml`, with the `PersistentVolumeClaim` object definition. -.... - -[NOTE] -==== -Use "object", "resource", "custom resource", "spec", etc. as appropriate after the object reference. This helps with clarity and readability. - -Another situation where this is necessary is when referring to the plural version of objects. Do not add an "s" to the end of an object name reference to make it plural. Use only the official `kind` of object (for example, seen when you run `oc api-resources`). - -For example, the object `kind` for a node is `Node`, not `Nodes`. So do not write "You can create `Nodes` using `kubectl`." Instead, rewrite to something like "You can create `Node` objects using `kubectl`." -==== - -[id="operator-name-capitalization"] -=== Operator capitalization - -The term "Operator" is always capitalized. For example: - ----- -= Support policy for unmanaged Operators - -Individual Operators have a `managementState` parameter in their configuration. ----- - -An Operator's full name must be a proper noun, with each word initially -capitalized. If it includes a product name, defer the product's capitalization -style guidelines. For example: - -- Red Hat OpenShift Logging Operator -- Prometheus Operator -- etcd Operator -- Node Tuning Operator -- Cluster Version Operator - -[NOTE] -==== -Red Hat Brand and Legal guidance for Operator names will likely differ. For marketing materials, they prefer lowercase names for anything that is not a Red Hat product. - -However, the Brand team recognizes that there are different standards for marketing materials versus technical content. For this reason, the title case capitalization for Operator names in technical product documentation and OperatorHub is acceptable. - -The "Naming" page by Red Hat Brand on the Source provides an overview of naming slide deck that also confirms this difference. -==== - -== Declarative config examples - -Many of our procedures provide imperative `oc` commands (which cannot be stored in a Git repo). Due to efforts around improving the experience for GitOps users, we sometimes also want to provide a declarative YAML example that achieves the same configuration. This allows users to store these YAML configurations in a Git repo and follow GitOps practices to configure OpenShift. - -[IMPORTANT] -==== -When adding declarative examples to procedures, do not completely replace the imperative command with the declarative YAML example. Some users might still prefer the imperative option. -==== - -To add a declarative YAML example to a procedure step with an existing imperative command, add it in a "TIP" admonition by following the template in the example below. This example uses an imperative command (`oc create configmap`) to create a config map, and then provides the declarative YAML example of the `ConfigMap` object afterward. - -.... -* Define a `ConfigMap` object containing the certificate authority by using the following command: -+ -[source,terminal] ----- -$ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ca-config-map - namespace: openshift-config -type: Opaque -data: - ca.crt: ----- -==== -.... - -This renders as: - -> * Define a `ConfigMap` object containing the certificate authority by using the following command: -> + -> [source,terminal] -> ---- -> $ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config -> ---- -> + -> [TIP] -> ==== -> You can alternatively apply the following YAML to create the config map: -> -> [source,yaml] -> ---- -> apiVersion: v1 -> kind: ConfigMap -> metadata: -> name: ca-config-map -> namespace: openshift-config -> type: Opaque -> data: -> ca.crt: -> ---- -> ==== - -[NOTE] -==== -If you are adding a particularly long YAML block, you can optionally use the xref:collapsible-content[`%collapsible`] feature to allow users to collapse the code block. -==== - -== Quick markup reference - -|=== -|Convention |Markup |Example rendered output - -|Code blocks - -a| -.... -Use the following syntax for the `oc` command: - ----- -$ oc ----- -.... - -a| -> Use the following syntax for the `oc` command: -> -> ---- -> $ oc -> ---- - -a|Use backticks for all non-GUI "system items", including: - -* Inline commands, operations, literal values, variables, parameters, settings, -flags, environment variables, user input -* System term/item, user names, unique or example names for individual API -objects/resources (e.g., a pod named `mypod`), daemon, service, or software -package -* RPM packages -* File names or directory paths - -a| -.... -`oc get` - -Set the `upgrade` variable to `true`. - -Use the `--amend` flag. - -Answer by typing `Yes` or `No` when prompted. - -`user_name` - -`service_name` - -`package_name` - -`filename` -.... - -a| -> Use the `oc get services` command to get a list of services that are currently defined. -> ->   -> -> Use the `--amend` flag. -> ->   -> -> Set the `upgrade` variable to `true`. -> ->   -> -> Answer by typing `Yes` or `No` when prompted. -> ->   -> -> `cluster-admin` user -> ->   -> -> `firewalld` service -> ->   -> -> `rubygems` RPM package -> ->   -> -> The `express.conf` configuration file is located in the `/usr/share` directory. - -|System or software variable to be replaced by the user -a| -.... -`` - -`` - -`` -.... - -a| -> Use the following command to roll back a Deployment, specifying the Deployment name: -> -> `oc rollback ` -> ->   -> -> Apply the new configuration file: -> -> `oc apply -f /.yaml` - -|Use single asterisks for web console / GUI items (menus, buttons, page titles, etc.). -Use two characters to form the arrow in a series of menu items (`$$->$$`). - -a| -.... -Choose *Cluster Console* from the list. - -Navigate to the *Operators* -> *Catalog Sources* page. - -Click *Create Subscription*. -.... - -a| -> Choose *Cluster Console* from the list. -> ->   -> -> Navigate to the *Operators* -> *Catalog Sources* page. -> ->   -> -> Click *Create Subscription*. - -|Use underscores to emphasize the first appearance of a new term. - -a| -.... -An _Operator_ is a method of packaging, deploying, -and managing a Kubernetes application. -.... - -a| -> An _Operator_ is a method of packaging, deploying, and managing a Kubernetes application. - -|Use of underscores for general emphasis is allowed but should only be used -very sparingly. Let the writing, instead of font usage, create the emphasis -wherever possible. - -a| -.... -Do _not_ delete the file. -.... - -a| -> Do _not_ delete the file. - -|Footnotes - -|A footnote is created with the footnote macro. If you plan to reference a footnote more than once, use the ID footnoteref macro. The Customer Portal does not support spaces in the footnoteref. For example, "dynamic PV" should be "dynamicPV". - -|For footnote and footnoteref syntax, see link:http://asciidoctor.org/docs/user-manual/#user-footnotes[AsciiDoctor documentation]. - -|=== diff --git a/contributing_to_docs/docs_production_deployment.adoc b/contributing_to_docs/docs_production_deployment.adoc deleted file mode 100644 index b3ba9c39f34c..000000000000 --- a/contributing_to_docs/docs_production_deployment.adoc +++ /dev/null @@ -1,51 +0,0 @@ -[id="contributing-to-docs-docs-production-deployment"] -= Production deployment of the OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:linkattrs: -:description: How to deploy the entire set of documentation - -toc::[] - -== Source-to-image pipeline -OpenShift documentation is built and deployed on an https://cloud.redhat.com/products/dedicated/[OpenShift Dedicated cluster] -using a https://github.com/openshift/source-to-image[source-to-image] build pipeline. - -The source-to-image builder image is built from a https://github.com/openshift-cs/docs-builder/[community project in GitHub] -and published to https://quay.io/repository/openshift-cs/docs-builder. - -== Documentation deployment -Deploying the OpenShift documentation is simplified by using a -https://github.com/openshift-cs/docs-builder/blob/main/template.yaml[pre-built OpenShift template YAML]. - -You can use the following command to deploy the OpenShift Container Platform (commercial) documentation: - -[source,terminal] ----- -oc new-app https://raw.githubusercontent.com/openshift-cs/docs-builder/main/template.yaml \ - -p NAME=docs-openshift-com \ - -p PACKAGE=commercial \ - -p APPLICATION_DOMAIN=docs.openshift.com \ - -p BUILD_REPO=https://github.com/openshift/openshift-docs.git \ - -p BUILD_BRANCH=main ----- - -You can use the following command to deploy the OKD (community) documentation - -[source,terminal] ----- -oc new-app https://raw.githubusercontent.com/openshift-cs/docs-builder/main/template.yaml \ - -p NAME=docs-openshift-com \ - -p PACKAGE=community \ - -p APPLICATION_DOMAIN=docs.openshift.com \ - -p BUILD_REPO=https://github.com/openshift/openshift-docs.git \ - -p BUILD_BRANCH=main ----- - -== Deployment customization -It's possible to change the documentation source repository to another repository for development by changing the -`BUILD_REPO` parameter in the `oc new-app` command. - -To change the builder image, provide the `BUILDER_IMAGE` parameter in the `oc new-app` command. diff --git a/contributing_to_docs/images b/contributing_to_docs/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/contributing_to_docs/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/contributing_to_docs/modules b/contributing_to_docs/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/contributing_to_docs/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/contributing_to_docs/snippets b/contributing_to_docs/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/contributing_to_docs/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/contributing_to_docs/term_glossary.adoc b/contributing_to_docs/term_glossary.adoc deleted file mode 100644 index 94538e00962e..000000000000 --- a/contributing_to_docs/term_glossary.adoc +++ /dev/null @@ -1,745 +0,0 @@ -[id="contributing-to-docs-term-glossary"] -= OpenShift glossary of terms -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: - -toc::[] - -== Usage of OpenShift terms - -This topic provides guidelines for referring to the various components of -OpenShift 4 and objects of a running OpenShift system in our documentation. The -goal is to standardize terminology across OpenShift content and be consistent in -the usage of our terminology when referring to OpenShift components or -architecture. - -For terms that are also API objects, there is different guidance for general usage of the term versus referencing the actual API object. This glossary mainly defines the general usage guideline (lowercase, separating words), but be sure to use the object formatting (PascalCase, in monospace) when referring to the actual object. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more information. - -[NOTE] -==== -If you want to add terms or other content to this document, or if anything must -be fixed, send an email to openshift-docs@redhat.com or submit a PR -on GitHub. -==== - -== A - -'''' -=== action - -Usage: action - -An action consists of _project_, _verb_, and _resource_: - -* *Project* is the project containing the resource that is to be acted upon. -* *Verb* is a get, list, create, or update operation. -* *Resource* is the API endpoint being accessed. This is distinct from the -referenced resource itself, which can be a pod, deployment, build, etc. - -'''' -=== API server - -Usage: API server(s) - -A REST API endpoint for interacting with the system. New deployments and -configurations can be created with this endpoint, and the state of the system -can be interrogated through this endpoint as well. - -'''' -=== API service - -Usage: API service(s) - -When referencing the actual object, write as `APIService`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== app - -Usage: app(s) - -Acceptable when referring to a mobile or web xref:application[application]. - -'''' -[id="application"] -=== application - -Usage: application(s) - -Although the term application is no longer an official noun in OpenShift, -customers still create and host applications on OpenShift, and using the term -within certain contexts is acceptable. For example, the term application might -refer to some combination of an image, a Git repository, or a replication -controller, and this application might be running PHP, MySQL, Ruby, JBoss, or -something else. - -.Examples of correct usage -==== -OpenShift runs your applications. - -The `new-app` command creates a new application from the components you specify. - -My application has two Ruby web services connected to a database back end and a RabbitMQ message queue, as well as a python worker framework. - -You can check the health of your application by adding probes to the various parts. - -You can host a WordPress application on OpenShift. -==== - -'''' -=== Assisted Installer - -Usage: Assisted Installer - -In Red Hat OpenShift, the Assisted Installer is an installation solution that is offered on the Red Hat Hybrid Cloud Console to provide Software-as-a-Service functionality for cluster installations. - -Do not use: AI, assisted installer - -'''' -=== authorization - -Usage: authorization - -An authorization determines whether an _identity_ is allowed to perform any -action. It consists of _identity_ and _action_. - -== B - -'''' -=== boot image - -Usage: boot image(s) - -* A boot image is a disk image that contains a bootable operating system (OS) and all the configuration settings for the OS, such as drivers. - -'''' -=== build - -Usage: build(s), or when speaking generally about `Build` objects. - -* A build is the process of transforming input parameters into a resulting object. -* A `Build` object encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the pod that executed the build. - -When referencing the actual object, write as "``Build`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== build configuration - -Usage: build configuration(s) when speaking generally about `BuildConfig` objects. - -A `BuildConfig` object is the definition of the entire build process. A build configuration describes a single build definition and a set of triggers for when a new build is created. - -When referencing the actual object, write as "``BuildConfig`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -== C - -'''' -=== cluster - -Usage: cluster - -The collection of controllers, pods, and services and related DNS and networking -routing configuration that are defined on the system. - -'''' -=== cluster service version - -Usage: cluster service version - -Operator Lifecycle Manager (OLM), part of the Operator Framework, uses a cluster service version (CSV) to define the metadata that accompanies an Operator container image and assist in running the Operator in a cluster. This metadata is defined in a `ClusterServiceVersion` API object used to populate user interfaces with information such as its logo, description, and version. It is also a source of technical information that is required to run the Operator, like the RBAC rules it requires and which custom resources (CRs) it manages or depends on. - -This is commonly abbreviated as a CSV. - -'''' -=== config map - -Usage: config map(s) - -Config maps hold configuration data for pods to consume. - -When referencing the actual object, write as `ConfigMap`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -Do not use: configuration map(s) - -'''' -=== container - -Usage: container(s) - -'''' -=== containerize - -Usage: containerize(d) - -Use "containerized" as an adjective when referring to applications made up of -multiple services that are distributed in containers. "Containerized" can be -used interchangeably with "container-based." - -'''' -=== container group - -Usage: container group - -'''' -=== control plane - -Usage: control plane - -The control plane, which is composed of control plane machines, manages the {product-title} cluster. The control plane machines manage workloads on the compute machines, which are also known as worker machines. - -Note that the OpenShift "control plane" was previously known as "master" and could still be in the code. - -'''' -=== custom resource - -Usage: custom resource (CR) - -A resource implemented through the Kubernetes `CustomResourceDefinition` API. A custom resource is distinct from the built-in Kubernetes resources, such as the pod and service resources. Every CR is part of an API group. - -Do not capitalize. - -'''' -=== custom resource definition (CRD) - -Usage: custom resource definition (CRD) for the first time reference; CRD thereafter. - -Create a custom resource definition to define a new custom resource. - -This is commonly abbreviated as a CRD. - -== D - -'''' -=== deployment - -Usage: deployment(s) when speaking generally about `Deployment` or `DeploymentConfig` objects - -* A `Deployment` is a Kubernetes-native object that provides declarative updates for pods and -replica sets. -* A `DeploymentConfig` is an OpenShift-specific object that defines the template for a pod and manages -deploying new images or configuration changes. Uses replication controllers. Predates Kubernetes `Deployment` objects. - -When referencing the actual object, write as `Deployment` or `DeploymentConfig` as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -To avoid further confusion, do not refer to an overall OpenShift installation / -instance / cluster as an "OpenShift deployment". - -Do not use: deployment configuration(s), deployment config(s) - -'''' -=== disconnected - -Usage: disconnected environment, disconnected installation - -Use "disconnected" when discussing installing a cluster in an environment that does not have an active connection to the internet. Use "disconnected" regardless of whether the restriction is physical or logical. - -"Disconnected" is the preferred term over "restricted", "air-gapped", or "offline". - -'''' -=== Dockerfile - -Usage: Dockerfile; wrapped with [filename] markup. See -link:doc_guidelines.adoc[Documentation Guidelines] for markup information. - -Docker can build images automatically by reading the instructions from a -Dockerfile. A Dockerfile is a text document that contains all the commands you -would normally execute manually to build a docker image. - -Source: https://docs.docker.com/reference/builder/ - -.Examples of correct usage -==== -Open the [filename]#Dockerfile# and make the following changes. - -Create a [filename]#Dockerfile# at the root of your repository. -==== - -== E - -'''' -=== event - -Usage: event(s) - -An event is a data record expressing an occurrence and its context, based on the CNCF CloudEvents specification. -Events contain two types of information: the event data representing the occurrence, and the context metadata providing contextual information about the occurrence. -Events are routed from an event producer, or source, to connected event consumers. - -Routing can be performed based on information contained in the event, but an event will not identify a specific routing destination. -Events can be delivered through various industry standard protocols such as HTTP, AMQP, MQTT, or SMTP, or through messaging and broker systems, such as Kafka, NATS, AWS Kinesis, or Azure Event Grid. - -When referencing the actual object, write as `Event`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -// NOTE: This is inconsistently used, e.g. https://docs.openshift.com/container-platform/4.5/rest_api/metadata_apis/event-core-v1.html -See: link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#event-v1-core[Event v1 core API], link:https://github.com/cloudevents/spec/blob/master/primer.md#cloudevents-concepts[CloudEvents concepts], and link:https://github.com/cloudevents/spec/blob/master/spec.md#event[CloudEvents specification]. - -== F - -== G - -'''' -=== group/version/kind (GVK) - -Usage: group/version/kind (GVK) for the first time reference; GVK thereafter. - -A unique identifier for a Kubernetes API, specifying its _group_ (a collection of related APIs), _version_ (defines the release and level of stability), and _kind_ (an individual API type or name). - -While "GroupVersionKind" does appear in the API guide, typically there should not be a reason to mark up in reference to a specific object. Favor simply "GVK", or "GVKs" for pluralization, after the first time reference as much as possible. Avoid pluralizing the long form (e.g., group/version/kinds or groups/versions/kinds). - -== H - -== I - -'''' -=== identity - -Usage: identity or identities - -Both the user name and list of groups the user belongs to. - -'''' -=== image - -Usage: image(s) - -'''' -=== image stream - -Usage: image stream(s) - -Image streams provide a means of creating and updating container images in an ongoing way. - -'''' -=== Ignition config - -Usage: Ignition config file or Ignition config files - -The file that Ignition uses to configure Red Hat Enterprise Linux CoreOS (RHCOS) during -operating system initialization. The installation program generates different -Ignition config files to initialize bootstrap, control plane, and worker nodes. - -'''' - -=== Ingress - -Usage: Ingress - -API object that allows developers to expose services through an HTTP(S) aware -load balancing and proxy layer via a public DNS entry. The Ingress resource may -further specify TLS options and a certificate, or specify a public CNAME that -the OpenShift Ingress Controller should also accept for HTTP and HTTPS traffic. -An administrator typically configures their Ingress Controller to be visible -outside the cluster firewall, and might also add additional security, caching, or -traffic controls on the service content. - -'''' - -=== Ingress Controller - -Usage: Ingress Controller(s) - -A resource that forwards traffic to endpoints of services. The Ingress Controller -replaces router from {product-title} 3 and earlier. - -'''' -=== installer-provisioned infrastructure - -Usage: installer-provisioned infrastructure - -If the installation program deploys and configures the infrastructure that the -cluster runs on, it is an installer-provisioned infrastructure installation. - -Do not use: IPI - -== J - -== K - -'''' -=== kubelet - -Usage: kubelet(s) - -The agent that controls a Kubernetes node. Each node runs a kubelet, which -handles starting and stopping containers on a node, based on the desired state -defined by the control plane (also known as master). - -'''' -=== Kubernetes API server - -Usage: Kubernetes API server - -== L - -== M - -'''' -=== MetalLB - -Usage: MetalLB, MetalLB Operator, MetalLB project - -MetalLB is an open source project that provides a way to add services of type `LoadBalancer` to clusters that are not installed on infrastructure from a cloud provider. MetalLB primarily targets on-premise, bare-metal clusters, but any infrastructure that does not include a native load-balancing capability is a candidate. - -"MetalLB" always has the first letter and last two letters capitalized in general text. Do not use "Metallb." - -'''' -=== minion - -Usage: Deprecated. Use link:#node[node] instead. - -== N - -'''' -=== node - -Usage: node(s) - -A -http://docs.openshift.org/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node[node] -provides the runtime environments for containers. - -'''' -=== namespace - -Usage: namespace - -Typically synonymous with link:#project[project] in OpenShift parlance, which is -preferred. - -== O - -'''' -=== OpenShift - -Usage: OpenShift Container Platform, OpenShift Online, OpenShift Dedicated, -OpenShift Container Engine - -The OpenShift product name should be paired with its product distribution / -variant name whenever possible. Previously, the upstream distribution was called -OpenShift Origin, however it is now called OKD; use of the OpenShift Origin name -is deprecated. - -Avoid using the name "OpenShift" on its own when referring to something that -applies to all distributions, as OKD does not have OpenShift in its name. -However, the following components currently use "OpenShift" in the name and are -allowed for use across all distribution documentation: - -- OpenShift Pipeline -- OpenShift SDN -- OpenShift Ansible Broker (deprecated in 4.2 / removed in 4.4) - -'''' -=== OpenShift API server - -Usage: OpenShift API server - -'''' -=== OpenShift CLI - -Usage: OpenShift CLI (`oc`) - -The `oc` tool is the command-line interface of OpenShift 3 and 4. - -When referencing as a prerequisite for a procedure module, use the following -construction: Install the OpenShift CLI (`oc`). - -'''' -=== Operator - -Usage: Operator(s) - -An Operator is a method of packaging, deploying and managing a Kubernetes -application. A Kubernetes application is an application that is both deployed on -a Kubernetes cluster (including OpenShift clusters) and managed using the -Kubernetes APIs and `kubectl` or `oc` tooling. - -The term "Operator" is always captalized. - -While "containerized" is allowed, do not use "Operatorize" to refer to building an -Operator that packages an application. - -.Examples of correct usage -==== -Install the etcd Operator. - -Build an Operator using the Operator SDK. -==== - -See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for -more on Operator naming. - -'''' -=== OperatorHub - -Usage: OperatorHub - -'''' -=== Operator Lifecycle Manager (OLM) -Usage: Operator Lifecycle Manager, OLM - -Refer to this component without a preceding article ("the"). - -.Examples of correct usage -==== -You can use OpenShift Lifecycle Manager (OLM) to manually or automatically upgrade an Operator. -==== - -'''' -=== Options menu - -Usage: Options menu; use sparingly; not to be confused with Actions menu, which -signifies a specific menu seen in the web console. - -This describes a menu type commonly called a "kebab", "hamburger", or "overflow" -menu that does not have hover text or a given name or label in the web console. - -'''' - -== P - -'''' -=== persistent volume (PV) - -Usage: persistent volume - -Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure. - -'''' -=== persistent volume claim (PVC) - -Usage: persistent volume claim - -Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure. - -'''' -=== pod - -Usage: pod(s) - -Kubernetes object that groups related Docker containers that have to share -network, file system, or memory together for placement on a node. Multiple -instances of a pod can run to provide scaling and redundancy. - -When referencing the actual object, write as `Pod`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== project - -Usage: project(s) - -A project allows a community of users to organize and manage their content in -isolation from other communities. It is an extension of the `Namespace` object -from Kubernetes. - -When referencing the actual object, write as `Project`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -== Q - -'''' -=== quick start - -Usage: quick start(s) - -There are two types of quick starts in OpenShift: - -* quick starts that are guided tutorials in the web console -* quick start templates that allow users to quickly get started creating a new application - -Be sure to provide context about which type of quick start you are referring to. - -== R - -'''' -=== replica set - -Usage: replica set(s) - -Similar to a replication controller, a replica set is a native Kubernetes API -object that ensures a specified number of pod replicas are running at any given -time. Used by `Deployment` objects. - -When referencing the actual object, write as `ReplicaSet`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -See link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet - Kubernetes]. - -'''' -=== replication controller - -Usage: replication controller(s) - -Kubernetes object that ensures N (as specified by the user) instances of a given -pod are running at all times. Used by deployment configs. - -'''' -=== route - -Usage: route(s) - -OpenShift-specific API object that allows developers to expose services through -an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The -route might further specify TLS options and a certificate, or specify a public -CNAME that the OpenShift Ingress Controller should also accept for HTTP and -HTTPS traffic. An administrator typically configures their Ingress Controller to -be visible outside the cluster firewall, and might also add additional security, -caching, or traffic controls on the service content. - -== S - -'''' -=== scheduler - -Usage: scheduler(s) - -Component of the Kubernetes control plane or OpenShift control plane that manages the state of -the system, places pods on nodes, and ensures that all containers that are -expected to be running are actually running. - -'''' -=== secret - -Usage: secret(s) - -Kubernetes API object that holds secret data of a certain type. - -See link:https://kubernetes.io/docs/concepts/configuration/secret/[Secrets - Kubernetes]. - -'''' -=== security context constraints (SCC) - -Usage: security context constraints - -Security context constraints govern the ability to make requests that affect the security context that will be applied to a container. - -When referencing the actual object, write as `SecurityContextConstraints`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -This is commonly abbreviated as SCC. - -'''' -=== service - -Usage: service(s) - -Kubernetes native API object that serves as an internal load balancer. It -identifies a set of replicated pods to proxy the connections it -receives to them. Backing pods can be added to or removed from a service -arbitrarily while the service remains consistently available, enabling anything -that depends on the service to refer to it at a consistent address. - -A service is a named abstraction of software service (for example, `mysql`) -consisting of local port (for example `3306`) that the proxy listens on, and the -selector that determines which pods will answer requests sent through the proxy. - -Do not confuse with link:https://www.openservicebrokerapi.org/[Open Service Broker API related objects]. -See -link:https://docs.openshift.com/container-platform/3.11/architecture/service_catalog/index.html#service-catalog-concepts-terminology[Service Catalog Concepts and Terminology]. - -'''' -=== service account - -Usage: service account(s) - -A service account binds together: - -* a name, understood by users, and perhaps by peripheral systems, for an identity -* a principal that can be authenticated and authorized -* a set of secrets - -'''' -=== single-node OpenShift - -Usage: single-node OpenShift - -Single-node OpenShift (or {product-title} on a single-node cluster) is a deployment footprint that provides control plane and worker node capabilities in a single server for deployments in constrained environments. - -Do not use: Single Node Openshift (SNO). - -'''' -=== three-node OpenShift - -Usage: three-node OpenShift - -Three-node OpenShift is a compact cluster deployment footprint on three nodes for deployments in constrained environments. It provides three control plane nodes that you configure as schedulable for workloads. - -Do not use: Three Node Openshift. - -'''' -=== SkyDNS - -Usage: SkyDNS - -Component of the Kubernetes control plane or OpenShift control plane that provides -cluster-wide DNS resolution of internal hostnames for services and pods. - -'''' -=== Source-to-Image (S2I) - -Usage: Source-to-Image for the first time reference; S2I thereafter. - -Deprecated abbreviation (do not use): STI - -'''' -=== spec - -Usage: spec(s) - -In addition to "spec file" being allowed related to RPM spec files, general -usage of "spec" is allowed when describing Kubernetes or OpenShift object specs -/ manifests / definitions. - -.Examples of correct usage -==== -Update the `Pod` spec to reflect the changes. -==== - -'''' -=== storage class - -Usage: storage class(es) - -Kubernetes API object that describes the parameters for a class of storage for -which persistent volumes can be dynamically provisioned. storage classes are -non-namespaced; the name of the storage class according to etcd is in -`ObjectMeta.Name`. - -When referencing the actual object, write as `StorageClass`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -See link:https://kubernetes.io/docs/concepts/storage/storage-classes/[Storage Classes - Kubernetes]. - -== T - -== U - -'''' -=== update - -Usage: update - -Use "update" when referring to updating the cluster to a new version. Although "upgrade" is sometimes used interchangeably, "update" is the preferred term to use, for consistency. - - -'''' -=== user-provisioned infrastructure - -Usage: user-provisioned infrastructure - -If the user must deploy and configure separate virtual or physical hosts as part of -the cluster deployment process, it is a user-provisioned infrastructure -installation. - -Do not use: UPI - -'''' - -== V - -== W - -== X - -== Y - -== Z diff --git a/contributing_to_docs/tools_and_setup.adoc b/contributing_to_docs/tools_and_setup.adoc deleted file mode 100644 index 9f4bc22452c8..000000000000 --- a/contributing_to_docs/tools_and_setup.adoc +++ /dev/null @@ -1,185 +0,0 @@ -[id="contributing-to-docs-tools-and-setup"] -= Install and set up the tools and software -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:linkattrs: -:description: How to set up and install the tools to contribute - -toc::[] - -== Create a GitHub account -Before you can contribute to OpenShift documentation, you must -https://www.github.com/join[sign up for a GitHub account]. - -== Set up authentication -When you have your account set up, follow the instructions to -https://help.github.com/articles/generating-ssh-keys/[generate and set up SSH -keys on GitHub] for proper authentication between your workstation and GitHub. - -Confirm authentication is working correctly with the following command: - ----- -$ ssh -T git@github.com ----- - -== Fork and clone the OpenShift documentation repository -You must fork and set up the OpenShift documentation repository on your -workstation so that you can create PRs and contribute. These steps must only -be performed during initial setup. - -. Fork the https://github.com/openshift/openshift-docs repository into your -GitHub account from the GitHub UI. You can do this by clicking on *Fork* in the -upper right-hand corner. - -. In the terminal on your workstation, change into the directory where you want -to clone the forked repository. - -. Clone the forked repository onto your workstation with the following -command, replacing __ with your actual GitHub username. -+ ----- -$ git clone git@github.com:/openshift-docs.git ----- - -. Change into the directory for the local repository you just cloned. -+ ----- -$ cd openshift-docs ----- - -. Add an upstream pointer back to the OpenShift's remote repository, in this -case _openshift-docs_. -+ ----- -$ git remote add upstream git@github.com:openshift/openshift-docs.git ----- - -This ensures that you are tracking the remote repository to keep your local -repository in sync with it. - -== Install AsciiBinder and dependencies -When you have the documentation repository cloned and set up, you are ready to -install the software and tools you will use to create the content. All OpenShift -documentation is created in AsciiDoc, and is processed with https://github.com/redhataccess/ascii_binder[AsciiBinder], -which is an http://asciidoctor.org/[AsciiDoctor]-based docs management system. - - -=== What you require -The following are minimum requirements: - -* A bash shell environment (Linux and OS X include a bash shell environment out -of the box, but if you are on Windows you can use http://cygwin.com/[Cygwin]) -* https://www.ruby-lang.org/en/[Ruby] -* http://www.git-scm.com/[Git] -* A web browser (Firefox, Chrome, or Safari) -* An editor that can strip trailing whitespace, such as -link:https://code.visualstudio.com/[Visual Studio Code]. - -=== Install the required software dependencies on a Linux system -The following instructions describe how to install all the required tools to do -live content editing on a Fedora Linux system. - -1. Install the _RubyGems_ package with `yum install rubygems` -+ -[NOTE] -==== -On certain systems, `yum` installs an older version of RubyGems that can cause issues. As an alternative, you can install RubyGems by using RVM. The following example is referenced from the link:https://rvm.io/rvm/install[RVM site]: - -[source,terminal] ----- -$ curl -sSL https://get.rvm.io | bash -s stable --ruby ----- -==== - -2. Install _Ruby_ development packages with `yum install ruby-devel` -3. Install _gcc_ with `yum install gcc-c++` -4. Install _redhat-rpm-config_ with `yum install redhat-rpm-config` -5. Install _make_ with `yum install make` -6. Install _asciidoctor-diagram_ with `gem install asciidoctor-diagram` -7. Install the _ascii_binder_ gem with `gem install ascii_binder` - -NOTE: If you already have AsciiBinder installed, you might be due for an update. -These directions assume that you are using AsciiBinder 0.2.0 or newer. To check -and update if necessary, simply run `gem update ascii_binder`. Note that you might require root permissions. - -=== Install the required software dependencies in a toolbox container on Linux - -You can use link:https://containertoolbx.org/[`toolbx`] to create a Fedora-based container for our tools on most Linux distributions, including RHEL. By using Fedora as the base, you have access to relatively recent versions of required software packages. - -.Prerequisites - -* Your distro has link:https://podman.io/[Podman] 1.4.0 or greater. - -.Procedure - -. If you don't already have `toolbx`, link:https://containertoolbx.org/install/[install it]. - -. To create a Fedora 37 container, on a command line, enter: -+ -[source,terminal] ----- -$ toolbox create --distro fedora --release f37 ----- -+ -where: - -:: Specifies the name that you want to give your toolbox container. - -. Enter the container. From the command line, run: -+ -[source,terminal] ----- -$ toolbox enter ----- - -. Install dependencies for our tools. Within the toolbox that you entered, run: -+ -[source,terminal] ----- -[toolbox] $ sudo dnf install ruby-devel gcc-c++ redhat-rpm-config make ----- - -. Install the required Ruby gems: -+ -[source,terminal] ----- -[toolbox] $ gem install ascii_binder asciidoctor-diagram ----- - -You now have a toolbox container that you can use to build our documentation no matter which distribution you use. - -NOTE: Press *Ctrl + D* or enter `exit` to exit the container. To use AsciiBinder or update the software in the container, remember to `toolbox enter ` first. - -=== Building the collection -With the initial setup complete, you are ready to build the collection. - -1. From the `openshift-docs` directory, run an initial build: -+ ----- -$ cd openshift-docs -$ asciibinder build ----- -2. Open the generated HTML file in your web browser. This will be located in the -`openshift-docs/_preview//` directory, with the same path and -filename as the original `.adoc` file you edited, only it will be with the -`.html` extension. - -== Clean up -The `.gitignore` file is set up to prevent anything under the `_preview` and -`_package` directories from being committed. However, you can reset the -environment manually by running: - ----- -$ asciibinder clean ----- - -== Next steps -With the repository and tools set up on your workstation, you can now either -edit existing content or create assemblies and modules. - -* link:doc_guidelines.adoc[Review the documentation guidelines] to understand -some basic guidelines to keep things consistent across our content. -* link:create_or_edit_content.adoc[Create a local working branch] on your -workstation to edit existing content or create content. diff --git a/distr_tracing/_attributes b/distr_tracing/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/distr_tracing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/_attributes b/distr_tracing/distr_tracing_arch/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_arch/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc b/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc deleted file mode 100644 index ff905a8f4c22..000000000000 --- a/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-architecture"] -= Distributed tracing architecture -include::_attributes/common-attributes.adoc[] -:context: distributed-tracing-architecture - -toc::[] - -Every time a user takes an action in an application, a request is executed by the architecture that may require dozens of different services to participate to produce a response. -{DTProductName} lets you perform distributed tracing, which records the path of a request through various microservices that make up an application. - -_Distributed tracing_ is a technique that is used to tie the information about different units of work together — usually executed in different processes or hosts — to understand a whole chain of events in a distributed transaction. -Developers can visualize call flows in large microservice architectures with distributed tracing. -It is valuable for understanding serialization, parallelism, and sources of latency. - -{DTProductName} records the execution of individual requests across the whole stack of microservices, and presents them as traces. A _trace_ is a data/execution path through the system. An end-to-end trace is comprised of one or more spans. - -A _span_ represents a logical unit of work in {DTProductName} that has an operation name, the start time of the operation, and the duration, as well as potentially tags and logs. Spans may be nested and ordered to model causal relationships. - -include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] - -include::modules/distr-tracing-features.adoc[leveloffset=+1] - -include::modules/distr-tracing-architecture.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_arch/images b/distr_tracing/distr_tracing_arch/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_arch/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/modules b/distr_tracing/distr_tracing_arch/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_arch/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/snippets b/distr_tracing/distr_tracing_arch/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_arch/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/_attributes b/distr_tracing/distr_tracing_config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/images b/distr_tracing/distr_tracing_config/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_config/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/modules b/distr_tracing/distr_tracing_config/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_config/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/snippets b/distr_tracing/distr_tracing_config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/_attributes b/distr_tracing/distr_tracing_install/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc b/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc deleted file mode 100644 index 4f5de2decc7c..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc +++ /dev/null @@ -1,94 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-deploying"] -= Configuring and deploying distributed tracing -include::_attributes/common-attributes.adoc[] -:context: deploying-distr-tracing-platform - -toc::[] - -The {JaegerName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {JaegerShortName} resources. You can either install the default configuration or modify the file to better suit your business requirements. - -{JaegerName} has predefined deployment strategies. You specify a deployment strategy in the custom resource file. When you create a {JaegerShortName} instance the Operator uses this configuration file to create the objects necessary for the deployment. - -.Jaeger custom resource file showing deployment strategy -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: MyConfigFile -spec: - strategy: production <1> ----- - -<1> The {JaegerName} Operator currently supports the following deployment strategies: - -* *allInOne* (Default) - This strategy is intended for development, testing, and demo purposes; it is not intended for production use. The main backend components, Agent, Collector, and Query service, are all packaged into a single executable which is configured, by default. to use in-memory storage. -+ -[NOTE] -==== -In-memory storage is not persistent, which means that if the {JaegerShortName} instance shuts down, restarts, or is replaced, that your trace data will be lost. And in-memory storage cannot be scaled, since each pod has its own memory. For persistent storage, you must use the `production` or `streaming` strategies, which use Elasticsearch as the default storage. -==== - -* *production* - The production strategy is intended for production environments, where long term storage of trace data is important, as well as a more scalable and highly available architecture is required. Each of the backend components is therefore deployed separately. The Agent can be injected as a sidecar on the instrumented application. The Query and Collector services are configured with a supported storage type - currently Elasticsearch. Multiple instances of each of these components can be provisioned as required for performance and resilience purposes. - -* *streaming* - The streaming strategy is designed to augment the production strategy by providing a streaming capability that effectively sits between the Collector and the Elasticsearch backend storage. This provides the benefit of reducing the pressure on the backend storage, under high load situations, and enables other trace post-processing capabilities to tap into the real time span data directly from the streaming platform (https://access.redhat.com/documentation/en-us/red_hat_amq/7.6/html/using_amq_streams_on_openshift/index[AMQ Streams]/ https://kafka.apache.org/documentation/[Kafka]). -+ -[NOTE] -==== -The streaming strategy requires an additional Red Hat subscription for AMQ Streams. -==== - -[NOTE] -==== -The streaming deployment strategy is currently unsupported on {ibmzProductName}. -==== - -[NOTE] -==== -There are two ways to install and use {DTProductName}, as part of a service mesh or as a stand alone component. If you have installed {DTShortName} as part of {SMProductName}, you can perform basic configuration as part of the xref:../../service_mesh/v2x/installing-ossm.adoc#installing-ossm[ServiceMeshControlPlane] but for completely control you should configure a Jaeger CR and then xref:../../service_mesh/v2x/ossm-observability.adoc#ossm-config-external-jaeger_observability[reference your distributed tracing configuration file in the ServiceMeshControlPlane]. - -==== - -include::modules/distr-tracing-deploy-default.adoc[leveloffset=+1] - -include::modules/distr-tracing-deploy-production-es.adoc[leveloffset=+1] - -include::modules/distr-tracing-deploy-streaming.adoc[leveloffset=+1] - -[id="validating-your-jaeger-deployment"] -== Validating your deployment - -include::modules/distr-tracing-accessing-jaeger-console.adoc[leveloffset=+2] - -[id="customizing-your-deployment"] -== Customizing your deployment - -include::modules/distr-tracing-deployment-best-practices.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-dedicated[] -For information about configuring persistent storage, see xref:../../storage/understanding-persistent-storage.adoc[Understanding persistent storage] and the appropriate configuration topic for your chosen storage option. -endif::[] - -include::modules/distr-tracing-config-default.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-jaeger-collector.adoc[leveloffset=+2] - -//include::modules/distr-tracing-config-otel-collector.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-sampling.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-storage.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-query.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-ingester.adoc[leveloffset=+2] - -[id="injecting-sidecars"] -== Injecting sidecars - -{JaegerName} relies on a proxy sidecar within the application's pod to provide the agent. The {JaegerName} Operator can inject Agent sidecars into Deployment workloads. You can enable automatic sidecar injection or manage it manually. - -include::modules/distr-tracing-sidecar-automatic.adoc[leveloffset=+2] - -include::modules/distr-tracing-sidecar-manual.adoc[leveloffset=+2] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc b/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc deleted file mode 100644 index e1e19f61dc1e..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-deploying-otel"] -= Configuring and deploying distributed tracing data collection -include::_attributes/common-attributes.adoc[] -:context: deploying-distr-tracing-data-collection - -toc::[] - -The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELName} resources. You can either install the default configuration or modify the file to better suit your business requirements. - -include::modules/distr-tracing-config-otel-collector.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc deleted file mode 100644 index 9a64b8c418e9..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-distributed-tracing"] -= Installing distributed tracing -include::_attributes/common-attributes.adoc[] -:context: install-distributed-tracing - -toc::[] - -You can install {DTProductName} on {product-title} in either of two ways: - -* You can install {DTProductName} as part of {SMProductName}. Distributed tracing is included by default in the Service Mesh installation. To install {DTProductName} as part of a service mesh, follow the xref:../../service_mesh/v2x/preparing-ossm-installation.adoc#preparing-ossm-installation[Red Hat Service Mesh Installation] instructions. You must install {DTProductName} in the same namespace as your service mesh, that is, the `ServiceMeshControlPlane` and the {DTProductName} resources must be in the same namespace. - -* If you do not want to install a service mesh, you can use the {DTProductName} Operators to install {DTShortName} by itself. To install {DTProductName} without a service mesh, use the following instructions. - -== Prerequisites - -Before you can install {DTProductName}, review the installation activities, and ensure that you meet the prerequisites: - -* Possess an active {product-title} subscription on your Red Hat account. If you do not have a subscription, contact your sales representative for more information. - -* Review the xref:../../architecture/architecture-installation.adoc#installation-overview_architecture-installation[{product-title} {product-version} overview]. -* Install {product-title} {product-version}. - -** xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Install {product-title} {product-version} on AWS] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Install {product-title} {product-version} on user-provisioned AWS] -** xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Install {product-title} {product-version} on bare metal] -** xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[Install {product-title} {product-version} on vSphere] -* Install the version of the OpenShift CLI (`oc`) that matches your {product-title} version and add it to your path. - -* An account with the `cluster-admin` role. - -include::modules/distr-tracing-install-overview.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-elasticsearch.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-jaeger-operator.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-otel-operator.adoc[leveloffset=+1] - -//// -== Next steps -* xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#deploying-distributed-tracing[Deploy {DTProductName}]. -//// diff --git a/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc deleted file mode 100644 index f4d9b3d26524..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="removing-distributed-tracing"] -= Removing distributed tracing -include::_attributes/common-attributes.adoc[] -:context: removing-distributed-tracing - -toc::[] - -The steps for removing {DTProductName} from an {product-title} cluster are as follows: - -. Shut down any {DTProductName} pods. -. Remove any {DTProductName} instances. -. Remove the {JaegerName} Operator. -. Remove the {OTELName} Operator. - -include::modules/distr-tracing-removing-instance.adoc[leveloffset=+1] - -include::modules/distr-tracing-removing-instance-cli.adoc[leveloffset=+1] - - -== Removing the {DTProductName} Operators - -.Procedure - -. Follow the instructions for xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster]. - -* Remove the {JaegerName} Operator. - -//* Remove the {OTELName} Operator. - -* After the {JaegerName} Operator has been removed, if appropriate, remove the OpenShift Elasticsearch Operator. diff --git a/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc b/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc deleted file mode 100644 index 9de6057bcebf..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-distributed-tracing"] -= Upgrading distributed tracing -include::_attributes/common-attributes.adoc[] -:context: upgrading-distributed-tracing - -toc::[] - -Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. The OLM runs by default in {product-title}. -OLM queries for available Operators as well as upgrades for installed Operators. -For more information about how {product-title} handles upgrades, see the xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager] documentation. - -During an update, the {DTProductName} Operators upgrade the managed {DTShortName} instances to the version associated with the Operator. Whenever a new version of the {JaegerName} Operator is installed, all the {JaegerShortName} application instances managed by the Operator are upgraded to the Operator's version. For example, after upgrading the Operator from 1.10 installed to 1.11, the Operator scans for running {JaegerShortName} instances and upgrades them to 1.11 as well. - -For specific instructions on how to update the OpenShift Elasticsearch Operator, see xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging]. - -include::modules/distr-tracing-change-operator-20.adoc[leveloffset=+1] - -[IMPORTANT] -==== -If you have not already updated your OpenShift Elasticsearch Operator as described in xref:../../logging/cluster-logging-upgrading.adoc[Updating OpenShift Logging] complete that update before updating your {JaegerName} Operator. -==== - -For instructions on how to update the Operator channel, see xref:../../operators/admin/olm-upgrading-operators.adoc[Updating installed Operators]. diff --git a/distr_tracing/distr_tracing_install/images b/distr_tracing/distr_tracing_install/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_install/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/modules b/distr_tracing/distr_tracing_install/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_install/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/snippets b/distr_tracing/distr_tracing_install/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distributed-tracing-release-notes.adoc b/distr_tracing/distributed-tracing-release-notes.adoc deleted file mode 100644 index fcac0d5f7275..000000000000 --- a/distr_tracing/distributed-tracing-release-notes.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-release-notes"] -= Distributed tracing release notes -include::_attributes/common-attributes.adoc[] -:context: distributed-tracing-release-notes - -toc::[] - -include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] - -include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] - -include::modules/support.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-new-features.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-technology-preview.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-known-issues.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-fixed-issues.adoc[leveloffset=+1] diff --git a/distr_tracing/images b/distr_tracing/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/distr_tracing/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/distr_tracing/modules b/distr_tracing/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/snippets b/distr_tracing/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/distr_tracing/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/getting_started/_attributes b/getting_started/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/getting_started/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/getting_started/accessing-your-services.adoc b/getting_started/accessing-your-services.adoc deleted file mode 100644 index 93c9fb6080fb..000000000000 --- a/getting_started/accessing-your-services.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="accessing-your-services"] -= Accessing your services -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -Once you have an {product-title} subscription, you can access your services. - -include::modules/dedicated-creating-your-cluster.adoc[leveloffset=+1] - -include::modules/dedicated-accessing-your-cluster.adoc[leveloffset=+1] - -//// - -== Receiving status updates - -Access the status portal at link:https://status-dedicated.openshift.com[]. You -can also subscribe to notifications via email, SMS, or RSS by changing your -preferences in the status portal. - -//// - -== Requesting support - -If you have questions about your environment or must open a support ticket, -you can open or view a support case in the -link:https://access.redhat.com/support/cases/#/case/list[Red Hat Customer -Portal]. diff --git a/getting_started/dedicated-networking.adoc b/getting_started/dedicated-networking.adoc deleted file mode 100644 index 5f3e4a324521..000000000000 --- a/getting_started/dedicated-networking.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="dedicated-networking"] -= Neworking -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-configuring-your-application-routes.adoc[leveloffset=+1] -include::modules/dedicated-exposing-TCP-services.adoc[leveloffset=+1] diff --git a/getting_started/deleting-your-cluster.adoc b/getting_started/deleting-your-cluster.adoc deleted file mode 100644 index f06749fd3a62..000000000000 --- a/getting_started/deleting-your-cluster.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="deleting-your-cluster"] -= Deleting your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -To delete your {product-title} cluster: - -. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click - on the cluster you want to delete. - -. Click the *Actions* button, then *Delete Cluster*. - -. Type the name of the cluster highlighted in bold, then click *Delete*. - -Cluster deletion occurs automatically. diff --git a/getting_started/images b/getting_started/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/getting_started/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/getting_started/kubernetes-overview.adoc b/getting_started/kubernetes-overview.adoc deleted file mode 100644 index 54d4654afabd..000000000000 --- a/getting_started/kubernetes-overview.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="kubernetes-overview"] -= Kubernetes overview -include::_attributes/common-attributes.adoc[] -:context: kubernetes-overview - -toc::[] - -Kubernetes is an open source container orchestration tool developed by Google. You can run and manage container-based workloads by using Kubernetes. The most common Kubernetes use case is to deploy an array of interconnected microservices, building an application in a cloud native way. You can create Kubernetes clusters that can span hosts across on-premise, public, private, or hybrid clouds. - -Traditionally, applications were deployed on top of a single operating system. With virtualization, you can split the physical host into several virtual hosts. Working on virtual instances on shared resources is not optimal for efficiency and scalability. Because a virtual machine (VM) consumes as many resources as a physical machine, providing resources to a VM such as CPU, RAM, and storage can be expensive. Also, you might see your application degrading in performance due to virtual instance usage on shared resources. - -.Evolution of container technologies for classical deployments -image::247-OpenShift-Kubernetes-Overview.png[] - -To solve this problem, you can use containerization technologies that segregate applications in a containerized environment. Similar to a VM, a container has its own filesystem, vCPU, memory, process space, dependencies, and more. Containers are decoupled from the underlying infrastructure, and are portable across clouds and OS distributions. Containers are inherently much lighter than a fully-featured OS, and are lightweight isolated processes that run on the operating system kernel. VMs are slower to boot, and are an abstraction of physical hardware. VMs run on a single machine with the help of a hypervisor. - -You can perform the following actions by using Kubernetes: - -* Sharing resources -* Orchestrating containers across multiple hosts -* Installing new hardware configurations -* Running health checks and self-healing applications -* Scaling containerized applications - -include::modules/kubernetes-components.adoc[leveloffset=+1] - -include::modules/kubernetes-resources.adoc[leveloffset=+1] - -.Architecture of Kubernetes -image::247_OpenShift_Kubernetes_Overview-2.png[] - -A cluster is a single computational unit consisting of multiple nodes in a cloud environment. A Kubernetes cluster includes a control plane and worker nodes. You can run Kubernetes containers across various machines and environments. The control plane node controls and maintains the state of a cluster. You can run the Kubernetes application by using worker nodes. You can use the Kubernetes namespace to differentiate cluster resources in a cluster. Namespace scoping is applicable for resource objects, such as deployment, service, and pods. You cannot use namespace for cluster-wide resource objects such as storage class, nodes, and persistent volumes. - -include::modules/kubernetes-conceptual-guidelines.adoc[leveloffset=+1] diff --git a/getting_started/modules b/getting_started/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/getting_started/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/getting_started/openshift-cli.adoc b/getting_started/openshift-cli.adoc deleted file mode 100644 index 3852a6598f34..000000000000 --- a/getting_started/openshift-cli.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-cli"] -= Creating and building an application using the CLI -include::_attributes/common-attributes.adoc[] -:context: openshift-cli - -toc::[] - -[id="openshift-cli-before-you-begin"] - -== Before you begin - -* Review xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-about-cli_cli-developer-commands[About the OpenShift CLI]. -* You must be able to access a running instance of {product-title}. If you do not have access, contact your cluster administrator. -* You must have the OpenShift CLI (`oc`) xref:../cli_reference/openshift_cli/getting-started-cli.adoc#installing-openshift-cli[downloaded and installed]. - -include::modules/getting-started-cli-login.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-login[oc login] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-logout[oc logout] - -include::modules/getting-started-cli-creating-new-project.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-project[oc new-project] - -include::modules/getting-started-cli-granting-permissions.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] -* xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC overview] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-policy-add-role-to-user[oc policy add-role-to-user] - -include::modules/getting-started-cli-deploying-first-image.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-app[oc new-app] - - -include::modules/getting-started-cli-creating-route.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-create-route-edge[oc create route edge] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] - - -include::modules/getting-started-cli-examining-pod.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-describe[oc describe] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-label[oc label] -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#viewing-pods[Viewing pods] -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#viewing-pod-logs[Viewing pod logs] - -include::modules/getting-started-cli-scaling-app.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-scale[oc scale] - -include::modules/getting-started-cli-deploying-python-app.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-app[oc new-app] - -include::modules/getting-started-cli-connecting-a-database.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-project[oc new-project] - -include::modules/getting-started-cli-creating-secret.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-create-secret-generic[oc create secret generic] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-set-env[oc set env] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-rollout-status[oc rollout status] - -include::modules/getting-started-cli-load-data-output.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-exec[oc exec] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-label[oc label] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] diff --git a/getting_started/openshift-overview.adoc b/getting_started/openshift-overview.adoc deleted file mode 100644 index 82aebc6bbe42..000000000000 --- a/getting_started/openshift-overview.adoc +++ /dev/null @@ -1,115 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-overview"] -= {product-title} overview -include::_attributes/common-attributes.adoc[] -:context: openshift-overview - -toc::[] - -{product-title} is a cloud-based Kubernetes container platform. The foundation of {product-title} is based on Kubernetes and therefore shares the same technology. It is designed to allow applications and the data centers that support them to expand from just a few machines and applications to thousands of machines that serve millions of clients. - -{product-title} enables you to do the following: - -* Provide developers and IT organizations with cloud application platforms that can be used for deploying applications on secure and scalable resources. -* Require minimal configuration and management overhead. -* Bring the Kubernetes platform to customer data centers and cloud. -* Meet security, privacy, compliance, and governance requirements. - -With its foundation in Kubernetes, {product-title} incorporates the same technology that serves as the engine for massive telecommunications, streaming video, gaming, banking, and other applications. Its implementation in open Red Hat technologies lets you extend your containerized applications beyond a single cloud to on-premise and multi-cloud environments. - -include::modules/getting-started-openshift-common-terms.adoc[leveloffset=+1] -include::modules/understanding-openshift.adoc[leveloffset=+1] - - -[id="openshift-overview-install-openshift"] -== Installing {product-title} - -The {product-title} installation program offers you flexibility. You can use the installation program to deploy a cluster on infrastructure that the installation program provisions and the cluster maintains or deploy a cluster on infrastructure that you prepare and maintain. - -For more information about the installation process, the supported platforms, and choosing a method of installing and preparing your cluster, see the following: - -* xref:../installing/index.adoc#installation-overview_ocp-installation-overview[OpenShift Container Platform installation overview] -* xref:../installing/index.adoc#installation-process_ocp-installation-overview[Installation process] -* xref:../installing/index.adoc#supported-platforms-for-openshift-clusters_ocp-installation-overview[Supported platforms for OpenShift Container Platform clusters] -* xref:../installing/installing-preparing.adoc#installing-preparing-selecting-cluster-type[Selecting a cluster installation type] - -include::modules/installation-openshift-local.adoc[leveloffset=+2] - -[id="openshift-next-steps"] -== Next Steps -=== For developers -Develop and deploy containerized applications with {product-title}. {product-title} is a platform for developing and deploying containerized applications. {product-title} documentation helps you: - -* **xref:../architecture/understanding-development.adoc#understanding-development[Understand {product-title} development]**: Learn the different types of containerized applications, from simple containers to advanced Kubernetes deployments and Operators. - -* **xref:../applications/projects/working-with-projects.adoc#working-with-projects[Work with projects]**: Create projects from the {product-title} web console or OpenShift CLI (`oc`) to organize and share the software you develop. - -* **xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Work with applications]**: - -Use xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective] in the {product-title} web console to -xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[create and deploy applications]. - -Use the -xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[*Topology* view] -to see your applications, monitor status, connect and group components, and modify your code base. - -* ** xref:../cli_reference/odo-important-update.adoc#odo-important_update[Use the developer CLI tool (`odo`)]**: -The `odo` CLI tool lets developers create single or multi-component applications and automates deployment, build, and service route configurations. It abstracts complex Kubernetes and {product-title} concepts, allowing you to focus on developing your applications. - -* **xref:../cicd/pipelines/understanding-openshift-pipelines.adoc#op-key-features[Create CI/CD Pipelines]**: Pipelines are serverless, cloud-native, continuous integration, and continuous deployment systems that run in isolated containers. -They use standard Tekton custom resources to automate deployments and are designed for decentralized teams working on microservices-based architecture. - -* **Deploy Helm charts**: -xref:../applications/working_with_helm_charts/understanding-helm.adoc#understanding-helm[Helm 3] -is a package manager that helps developers define, install, and update -application packages on Kubernetes. A Helm chart is a packaging format that -describes an application that can be deployed using the Helm CLI. - -* **xref:../cicd/builds/understanding-image-builds.adoc#understanding-image-builds[Understand image builds]**: Choose from different build strategies (Docker, S2I, custom, and pipeline) that can include different kinds of source materials (Git repositories, local binary inputs, and external artifacts). Then, follow examples of build types from basic builds to advanced builds. - -* **xref:../openshift_images/create-images.adoc#create-images[Create container images]**: A container image is the most basic building block in {product-title} (and Kubernetes) applications. Defining image streams lets you gather multiple versions of an image in one place as you continue its development. S2I containers let you insert your source code into a base container that is set up to run code of a particular type, such as Ruby, Node.js, or Python. - -* **xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[Create deployments]**: Use `Deployment` and `DeploymentConfig` objects to exert fine-grained management over applications. - xref:../applications/deployments/managing-deployment-processes.adoc#deployment-operations[Manage deployments] using the *Workloads* page or OpenShift CLI (`oc`). Learn xref:../applications/deployments/deployment-strategies.adoc#deployment-strategies[rolling, recreate, and custom] deployment strategies. - -* **xref:../openshift_images/using-templates.adoc#using-templates[Create templates]**: Use existing templates or create your own templates that describe how an application is built or deployed. A template can combine images with descriptions, parameters, replicas, exposed ports and other content that defines how an application can be run or built. - -* **xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understand Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn about the Operator Framework and how to deploy applications using installed Operators into your projects. - -* **xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Develop Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn the workflow for building, testing, and deploying Operators. Then, create your own Operators based on xref:../operators/operator_sdk/ansible/osdk-ansible-support.adoc#osdk-ansible-support[Ansible] or -xref:../operators/operator_sdk/helm/osdk-helm-support.adoc#osdk-helm-support[Helm], or configure xref:../operators/operator_sdk/osdk-monitoring-prometheus.adoc#osdk-monitoring-prometheus[built-in Prometheus monitoring] using the Operator SDK. - -* **xref:../rest_api/index.adoc#api-index[REST API reference]**: Learn about {product-title} application programming interface endpoints. - -=== For administrators -* **xref:../architecture/architecture.adoc#architecture-overview-architecture[Understand {product-title} management]**: Learn about components -of the {product-title} {product-version} control plane. See how {product-title} control plane and worker nodes are managed and updated through the xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machine-api-overview_creating-machineset-aws[Machine API] and xref:../architecture/control-plane.adoc#operators-overview_control-plane[Operators]. - -* **xref:../authentication/understanding-authentication.adoc#understanding-authentication[Manage users and groups]**: Add users and groups with different levels of permissions to use or modify clusters. - -* **xref:../authentication/understanding-authentication.adoc#understanding-authentication[Manage authentication]**: Learn how user, group, and API authentication -works in {product-title}. {product-title} supports multiple identity providers. - -* **xref:../networking/understanding-networking.adoc#understanding-networking[Manage networking]**: The cluster network in {product-title} is managed by the xref:../networking/cluster-network-operator.adoc#cluster-network-operator[Cluster Network Operator] (CNO). The CNO uses iptables rules in xref:../networking/openshift_sdn/configuring-kube-proxy.adoc#configuring-kube-proxy[kube-proxy] to direct traffic between nodes and pods running on those nodes. The Multus Container Network Interface adds the capability to attach xref:../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[multiple network interfaces] to a pod. Using -xref:../networking/network_policy/about-network-policy.adoc#about-network-policy[network policy] features, you can isolate your pods or permit selected traffic. - -* **xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Manage storage]**: {product-title} allows cluster administrators to configure persistent storage. - -* **xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Manage Operators]**: Lists of Red Hat, ISV, and community Operators can -be reviewed by cluster administrators and xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[installed on their clusters]. After you install them, you can xref:../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[run], xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[upgrade], back up, or otherwise manage the Operator on your cluster. - -* **xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-extending-api-with-crds[Use custom resource definitions (CRDs) to modify the cluster]**: Cluster features implemented with Operators can be modified with CRDs. Learn to xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-creating-custom-resources-definition_crd-extending-api-with-crds[create a CRD] and xref:../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-managing-resources-from-crds[manage resources from CRDs]. - -* **xref:../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[Set resource quotas]**: Choose from CPU, memory, and other system resources to xref:../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[set quotas]. - -* **xref:../applications/pruning-objects.adoc#pruning-objects[Prune and reclaim resources]**: Reclaim space by pruning unneeded Operators, groups, deployments, builds, images, registries, and cron jobs. - -* **xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc#scaling-cluster-monitoring-operator[Scale] and xref:../scalability_and_performance/using-node-tuning-operator.adoc#using-node-tuning-operator[tune] clusters**: Set cluster limits, tune nodes, scale cluster monitoring, and optimize networking, storage, and routes for your environment. - -* **xref:../updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc#update-service-overview_updating-restricted-network-cluster-osus[Using the OpenShift Update Service in a disconnected environement]**: Learn about installing and managing a local OpenShift Update Service for recommending {product-title} updates in disconnected environments. - -* **xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitor clusters]**: -Learn to xref:../monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack[configure the monitoring stack]. -After configuring monitoring, use the web console to access xref:../monitoring/reviewing-monitoring-dashboards.adoc#reviewing-monitoring-dashboards[monitoring dashboards]. In addition to infrastructure metrics, you can also scrape and view metrics for your own services. - -* **xref:../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring_about-remote-health-monitoring[Remote health monitoring]**: {product-title} collects anonymized aggregated information about your cluster. Using Telemetry and the Insights Operator, this data is received by Red Hat and used to improve {product-title}. You can view the xref:../support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc#showing-data-collected-by-remote-health-monitoring_showing-data-collected-by-remote-health-monitoring[data collected by remote health monitoring]. diff --git a/getting_started/openshift-web-console.adoc b/getting_started/openshift-web-console.adoc deleted file mode 100644 index f34f48e3b1c9..000000000000 --- a/getting_started/openshift-web-console.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-web-console"] -= Creating and building an application using the web console -include::_attributes/common-attributes.adoc[] -:context: openshift-web-console - -toc::[] - -[id="openshift-web-console-before-you-begin"] - -== Before you begin -* Review xref:../web_console/web-console.adoc#web-console-overview[Accessing the web console]. -* You must be able to access a running instance of {product-title}. If you do not have access, contact your cluster administrator. - -include::modules/getting-started-web-console-login.adoc[leveloffset=+1] - -include::modules/getting-started-web-console-creating-new-project.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/using-rbac.adoc#default-roles_using-rbac[Default cluster roles] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-granting-permissions.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/understanding-authentication.adoc#rbac-users_understanding-authentication[Understanding authentication] -* xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC overview] - -include::modules/getting-started-web-console-deploying-first-image.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc[Creating applications using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-examining-pod.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-interacting-with-applications-and-components_viewing-application-composition-using-topology-view[Interacting with applications and components] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-scaling-application-pods-and-checking-builds-and-routes_viewing-application-composition-using-topology-view[Scaling application pods and checking builds and routes] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-labels-and-annotations-used-for-topology-view_viewing-application-composition-using-topology-view[Labels and annotations used for the Topology view] - -include::modules/getting-started-web-console-scaling-app.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-scale-practices_cluster-scaling[Recommended practices for scaling the cluster] -* xref:../nodes/pods/nodes-pods-autoscaling.adoc#nodes-pods-autoscaling-about_nodes-pods-autoscaling[Understanding horizontal pod autoscalers] -* xref:../nodes/pods/nodes-pods-vertical-autoscaler.adoc#nodes-pods-vertical-autoscaler-about_nodes-pods-vertical-autoscaler[About the Vertical Pod Autoscaler Operator] - -include::modules/getting-started-web-console-deploying-python-app.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-adding-services-to-your-application_viewing-application-composition-using-topology-view[Adding services to your application] -* xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-connecting-a-database.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-adding-services-to-your-application_viewing-application-composition-using-topology-view[Adding services to your application] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-creating-secret.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about_nodes-pods-secrets[Understanding secrets] - -include::modules/getting-started-web-console-load-data-output.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-labels-and-annotations-used-for-topology-view_viewing-application-composition-using-topology-view[Labels and annotations used for the Topology view] diff --git a/getting_started/scaling-your-cluster.adoc b/getting_started/scaling-your-cluster.adoc deleted file mode 100644 index 0468c0c63a78..000000000000 --- a/getting_started/scaling-your-cluster.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="scaling-your-cluster"] -= Scaling your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-scaling-your-cluster.adoc[leveloffset=+1] diff --git a/getting_started/snippets b/getting_started/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/getting_started/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/hardware_enablement/_attributes b/hardware_enablement/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/hardware_enablement/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/hardware_enablement/about-hardware-enablement.adoc b/hardware_enablement/about-hardware-enablement.adoc deleted file mode 100644 index 36cef34b0431..000000000000 --- a/hardware_enablement/about-hardware-enablement.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-hardware-enablement"] -= About specialized hardware and driver enablement -include::_attributes/common-attributes.adoc[] -:context: about-hardware-enablement - -toc::[] - -The Driver Toolkit (DTK) is a container image in the {product-title} payload which is meant to be used as a base image on which to build driver containers. The Driver Toolkit image contains the kernel packages commonly required as dependencies to build or install kernel modules as well as a few tools needed in driver containers. The version of these packages will match the kernel version running on the RHCOS nodes in the corresponding {product-title} release. - -Driver containers are container images used for building and deploying out-of-tree kernel modules and drivers on container operating systems such as :op-system-first:. Kernel modules and drivers are software libraries running with a high level of privilege in the operating system kernel. They extend the kernel functionalities or provide the hardware-specific code required to control new devices. Examples include hardware devices like field-programmable gate arrays (FPGA) or graphics processing units (GPU), and software-defined storage solutions, which all require kernel modules on client machines. Driver containers are the first layer of the software stack used to enable these technologies on {product-title} deployments. \ No newline at end of file diff --git a/hardware_enablement/images b/hardware_enablement/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/hardware_enablement/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/hardware_enablement/kmm-kernel-module-management.adoc b/hardware_enablement/kmm-kernel-module-management.adoc deleted file mode 100644 index 6db1495f3d79..000000000000 --- a/hardware_enablement/kmm-kernel-module-management.adoc +++ /dev/null @@ -1,92 +0,0 @@ -:_content-type: ASSEMBLY -[id="kernel-module-management-operator"] -= Kernel Module Management Operator -include::_attributes/common-attributes.adoc[] -:context: kernel-module-management-operator - -toc::[] - -Learn about the Kernel Module Management (KMM) Operator and how you can use it to deploy out-of-tree kernel modules and device plugins on {product-title} clusters. - -:FeatureName: Kernel Module Management Operator - -include::modules/kmm-about-kmm.adoc[leveloffset=+1] -include::modules/kmm-installation.adoc[leveloffset=+1] -include::modules/kmm-installing-using-web-console.adoc[leveloffset=+2] -include::modules/kmm-installing-using-cli.adoc[leveloffset=+2] -include::modules/kmm-installing-older-versions.adoc[leveloffset=+2] -include::modules/kmm-deploying-modules.adoc[leveloffset=+1] -include::modules/kmm-creating-module-cr.adoc[leveloffset=+2] -include::modules/kmm-security.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission]. - -include::modules/kmm-example-module-cr.adoc[leveloffset=+2] -include::modules/kmm-creating-moduleloader-image.adoc[leveloffset=+1] -include::modules/kmm-running-depmod.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../hardware_enablement/psap-driver-toolkit.adoc#driver-toolkit[Driver Toolkit]. - -include::modules/kmm-building-in-cluster.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../cicd/builds/build-configuration.adoc#build-configuration[Build configuration resources]. - -include::modules/kmm-using-driver-toolkit.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../hardware_enablement/psap-driver-toolkit.adoc#driver-toolkit[Driver Toolkit]. - -//Deploying kernel modules (Might just leave this short intro in the assembly and put further module below it) -// * Running ModuleLoader images (CONCEPT, or could be included in the assembly with the intro) -// * Using the device plugin (CONCEPT, or could be included in the assembly with the intro) -// * Creating the Module Custom Resource (PROCEDURE? Seems like not a process the user does after reading it. Maybe a REFERENCE) -// * Security and permissions (CONCEPT or REFERENCE) -// * ServiceAccounts and SecurityContextConstraints (can include in Security and permissions) -// * Pod Security Standards (can include in Security and permissions) -// * Example Module CR (REFERENCE) - -// Added for TELCODOCS-1065 -include::modules/kmm-using-signing-with-kmm.adoc[leveloffset=+1] -include::modules/kmm-adding-the-keys-for-secureboot.adoc[leveloffset=+1] -include::modules/kmm-checking-the-keys.adoc[leveloffset=+2] -include::modules/kmm-signing-a-prebuilt-driver-container.adoc[leveloffset=+1] -include::modules/kmm-building-and-signing-a-moduleloader-container-image.adoc[leveloffset=+1] -.Additional resources -For information on creating a service account, see xref:https://docs.openshift.com/container-platform/4.12/authentication/understanding-and-creating-service-accounts.html#service-accounts-managing_understanding-service-accounts[Creating service accounts]. - -include::modules/kmm-debugging-and-troubleshooting.adoc[leveloffset=+1] - -// Added for TELCODOCS-1067 -include::modules/kmm-firmware-support.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../hardware_enablement/kmm-kernel-module-management.adoc#kmm-creating-moduleloader-image_kernel-module-management-operator[Creating a ModuleLoader image]. - -include::modules/kmm-configuring-the-lookup-path-on-nodes.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Machine Config Operator]. - -include::modules/kmm-building-a-moduleloader-image.adoc[leveloffset=+2] -include::modules/kmm-tuning-the-module-resource.adoc[leveloffset=+2] - -// Added for TELCODOCS-1059 -include::modules/kmm-troubleshooting.adoc[leveloffset=+1] -include::modules/kmm-must-gather-tool.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../support/gathering-cluster-data.adoc#about-must-gather_gathering-cluster-data[About the must-gather tool] - -include::modules/kmm-gathering-data-for-kmm.adoc[leveloffset=+3] -include::modules/kmm-gathering-data-for-kmm-hub.adoc[leveloffset=+3] diff --git a/hardware_enablement/modules b/hardware_enablement/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/hardware_enablement/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/hardware_enablement/psap-driver-toolkit.adoc b/hardware_enablement/psap-driver-toolkit.adoc deleted file mode 100644 index 9817ddbde47f..000000000000 --- a/hardware_enablement/psap-driver-toolkit.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="driver-toolkit"] -= Driver Toolkit -include::_attributes/common-attributes.adoc[] -:context: driver-toolkit - -toc::[] - -Learn about the Driver Toolkit and how you can use it as a base image for driver containers for enabling special software and hardware devices on {product-title} deployments. - -:FeatureName: The Driver Toolkit - -include::modules/psap-driver-toolkit.adoc[leveloffset=+1] - -include::modules/psap-driver-toolkit-pulling.adoc[leveloffset=+1] - -include::modules/psap-driver-toolkit-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_driver-toolkkit-id"] -== Additional resources - -* For more information about configuring registry storage for your cluster, see xref:../registry/configuring-registry-operator.adoc#registry-removed_configuring-registry-operator[Image Registry Operator in OpenShift Container Platform]. diff --git a/hardware_enablement/psap-node-feature-discovery-operator.adoc b/hardware_enablement/psap-node-feature-discovery-operator.adoc deleted file mode 100644 index e35b19794cf8..000000000000 --- a/hardware_enablement/psap-node-feature-discovery-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="node-feature-discovery-operator"] -= Node Feature Discovery Operator -include::_attributes/common-attributes.adoc[] -:context: node-feature-discovery-operator - -toc::[] - -Learn about the Node Feature Discovery (NFD) Operator and how you can use it to expose node-level information by orchestrating Node Feature Discovery, a Kubernetes add-on for detecting hardware features and system configuration. - -include::modules/psap-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-installing-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-using-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-configuring-node-feature-discovery.adoc[leveloffset=+1] - -include::modules/psap-node-feature-discovery-using-topology-updater.adoc[leveloffset=+1] - -include::modules/psap-node-feature-discovery-topology-updater-command-reference.adoc[leveloffset=+2] diff --git a/hardware_enablement/snippets b/hardware_enablement/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/hardware_enablement/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/hosted_control_planes/_attributes b/hosted_control_planes/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/hosted_control_planes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/hosted_control_planes/hcp-backup-restore-dr.adoc b/hosted_control_planes/hcp-backup-restore-dr.adoc deleted file mode 100644 index 4464b822f02c..000000000000 --- a/hosted_control_planes/hcp-backup-restore-dr.adoc +++ /dev/null @@ -1,159 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-backup-restore-dr"] -= Backup, restore, and disaster recovery for hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-backup-restore-dr - -toc::[] - -If you need to back up and restore etcd on a hosted cluster or provide disaster recovery for a hosted cluster, see the following procedures. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -[id="hcp-backup-restore"] -== Backing up and restoring etcd on a hosted cluster - -If you use hosted control planes on {product-title}, the process to back up and restore etcd is different from xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[the usual etcd backup process]. - -// Backing up etcd on a hosted cluster -include::modules/backup-etcd-hosted-cluster.adoc[leveloffset=+2] - -// Restoring an etcd snapshot on a hosted cluster -include::modules/restoring-etcd-snapshot-hosted-cluster.adoc[leveloffset=+2] - -[id="hcp-dr-aws"] -== Disaster recovery for a hosted cluster within an AWS region - -In a situation where you need disaster recovery (DR) for a hosted cluster, you can recover a hosted cluster to the same region within AWS. For example, you need DR when the upgrade of a management cluster fails and the hosted cluster is in a read-only state. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -The DR process involves three main steps: - -. Backing up the hosted cluster on the source management cluster -. Restoring the hosted cluster on a destination management cluster -. Deleting the hosted cluster from the source management cluster - -Your workloads remain running during the process. The Cluster API might be unavailable for a period, but that will not affect the services that are running on the worker nodes. - -[IMPORTANT] -==== -Both the source management cluster and the destination management cluster must have the `--external-dns` flags to maintain the API server URL, as shown in this example: - -.Example: External DNS flags -[source,terminal] ----- ---external-dns-provider=aws \ ---external-dns-credentials= \ ---external-dns-domain-filter= ----- - -That way, the server URL ends with `https://api-sample-hosted.sample-hosted.aws.openshift.com`. - -If you do not include the `--external-dns` flags to maintain the API server URL, the hosted cluster cannot be migrated. -==== - -[id="dr-hosted-cluster-env-context"] -=== Example environment and context - -Consider an scenario where you have three clusters to restore. Two are management clusters, and one is a hosted cluster. You can restore either the control plane only or the control plane and the nodes. Before you begin, you need the following information: - -* Source MGMT Namespace: The source management namespace -* Source MGMT ClusterName: The source management cluster name -* Source MGMT Kubeconfig: The source management `kubeconfig` file -* Destination MGMT Kubeconfig: The destination management `kubeconfig` file -* HC Kubeconfig: The hosted cluster `kubeconfig` file -* SSH key file: The SSH public key -* Pull secret: The pull secret file to access the release images -* AWS credentials -* AWS region -* Base domain: The DNS base domain to use as an external DNS -* S3 bucket name: The bucket in the AWS region where you plan to upload the etcd backup - -This information is shown in the following example environment variables. - -.Example environment variables -[source,terminal] ----- -SSH_KEY_FILE=${HOME}/.ssh/id_rsa.pub -BASE_PATH=${HOME}/hypershift -BASE_DOMAIN="aws.sample.com" -PULL_SECRET_FILE="${HOME}/pull_secret.json" -AWS_CREDS="${HOME}/.aws/credentials" -AWS_ZONE_ID="Z02718293M33QHDEQBROL" - -CONTROL_PLANE_AVAILABILITY_POLICY=SingleReplica -HYPERSHIFT_PATH=${BASE_PATH}/src/hypershift -HYPERSHIFT_CLI=${HYPERSHIFT_PATH}/bin/hypershift -HYPERSHIFT_IMAGE=${HYPERSHIFT_IMAGE:-"quay.io/${USER}/hypershift:latest"} -NODE_POOL_REPLICAS=${NODE_POOL_REPLICAS:-2} - -# MGMT Context -MGMT_REGION=us-west-1 -MGMT_CLUSTER_NAME="${USER}-dev" -MGMT_CLUSTER_NS=${USER} -MGMT_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT_CLUSTER_NS}-${MGMT_CLUSTER_NAME}" -MGMT_KUBECONFIG="${MGMT_CLUSTER_DIR}/kubeconfig" - -# MGMT2 Context -MGMT2_CLUSTER_NAME="${USER}-dest" -MGMT2_CLUSTER_NS=${USER} -MGMT2_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT2_CLUSTER_NS}-${MGMT2_CLUSTER_NAME}" -MGMT2_KUBECONFIG="${MGMT2_CLUSTER_DIR}/kubeconfig" - -# Hosted Cluster Context -HC_CLUSTER_NS=clusters -HC_REGION=us-west-1 -HC_CLUSTER_NAME="${USER}-hosted" -HC_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}" -HC_KUBECONFIG="${HC_CLUSTER_DIR}/kubeconfig" -BACKUP_DIR=${HC_CLUSTER_DIR}/backup - -BUCKET_NAME="${USER}-hosted-${MGMT_REGION}" - -# DNS -AWS_ZONE_ID="Z07342811SH9AA102K1AC" -EXTERNAL_DNS_DOMAIN="hc.jpdv.aws.kerbeross.com" ----- - -[id="dr-hosted-cluster-process"] -=== Overview of the backup and restore process - -The backup and restore process works as follows: - -. On management cluster 1, which you can think of as the source management cluster, the control plane and workers interact by using the external DNS API. The external DNS API is accessible, and a load balancer sits between the management clusters. -+ -image::298_OpenShift_Backup_Restore_0123_00.png[Diagram that shows the workers accessing the external DNS API and the external DNS API pointing to the control plane through a load balancer] - -. You take a snapshot of the hosted cluster, which includes etcd, the control plane, and the worker nodes. During this process, the worker nodes continue to try to access the external DNS API even if it is not accessible, the workloads are running, the control plane is saved in a local manifest file, and etcd is backed up to an S3 bucket. The data plane is active and the control plane is paused. -+ -image::298_OpenShift_Backup_Restore_0123_01.png[] - -. On management cluster 2, which you can think of as the destination management cluster, you restore etcd from the S3 bucket and restore the control plane from the local manifest file. During this process, the external DNS API is stopped, the hosted cluster API becomes inaccessible, and any workers that use the API are unable to update their manifest files, but the workloads are still running. -+ -image::298_OpenShift_Backup_Restore_0123_02.png[] - -. The external DNS API is accessible again, and the worker nodes use it to move to management cluster 2. The external DNS API can access the load balancer that points to the control plane. -+ -image::298_OpenShift_Backup_Restore_0123_03.png[] - -. On management cluster 2, the control plane and worker nodes interact by using the external DNS API. The resources are deleted from management cluster 1, except for the S3 backup of etcd. If you try to set up the hosted cluster again on mangagement cluster 1, it will not work. -+ -image::298_OpenShift_Backup_Restore_0123_04.png[] - -You can manually back up and restore your hosted cluster, or you can run a script to complete the process. For more information about the script, see "Running a script to back up and restore a hosted cluster". - -// Backing up the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-backup.adoc[leveloffset=+2] - -// Restoring the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-restore.adoc[leveloffset=+2] - -// Deleting the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-delete.adoc[leveloffset=+2] - -//Helper script -include::modules/dr-hosted-cluster-within-aws-region-script.adoc[leveloffset=+2] - diff --git a/hosted_control_planes/hcp-configuring.adoc b/hosted_control_planes/hcp-configuring.adoc deleted file mode 100644 index 123ff77ea567..000000000000 --- a/hosted_control_planes/hcp-configuring.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-configuring"] -= Configuring hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-configuring - -toc::[] - -To get started with hosted control planes for {product-title}, you first configure your hosted cluster on the provider that you want to use. Then, you complete a few management tasks. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -You can view the procedures by selecting from one of the following providers: - -[id="hcp-configuring-aws"] -== Amazon Web Services (AWS) - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosting-cluster-aws-infra-reqs[AWS infrastructure requirements]: Review the infrastructure requirements to create a hosted cluster on AWS. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosting-service-cluster-configure-aws[Configuring the hosting cluster on AWS (Technology Preview)]: The tasks to configure a hosted cluster on AWS include creating the AWS S3 OIDC secret, creating a routable public zone, enabling external DNS, enabling AWS PrivateLink, enabling the hosted control planes feature, and installing the hosted control planes CLI. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-aws[Managing hosted control plane clusters on AWS (Technology Preview)]: Management tasks include creating, importing, accessing, or deleting a hosted cluster on AWS. - -[id="hcp-configuring-bm"] -== Bare metal - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#infrastructure-reqs-bare-metal[Bare metal infrastructure requirements]: Review the infrastructure requirements to create a hosted cluster on bare metal. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#configuring-hosting-service-cluster-configure-bm[Configuring the hosting cluster on bare metal (Technology Preview)]: Configure DNS before you create a hosted cluster. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-bm[Managing hosted control plane clusters on bare metal (Technology Preview)]: Create a hosted cluster, create an `InfraEnv` resource, add agents, access the hosted cluster, scale the `NodePool` object, handle Ingress, enable node auto-scaling, or delete a hosted cluster. - -[id="hcp-configuring-virt"] -== {VirtProductName} - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-kubevirt[Managing hosted control plane clusters on OpenShift Virtualization (Technology Preview)]: Create {product-title} clusters with worker nodes that are hosted by KubeVirt virtual machines. - -// To be added after ACM 2.9 goes live: - -//{ibmpowerProductName} - - - - diff --git a/hosted_control_planes/hcp-managing.adoc b/hosted_control_planes/hcp-managing.adoc deleted file mode 100644 index 98f9d275c43f..000000000000 --- a/hosted_control_planes/hcp-managing.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-managing"] -= Managing hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-managing - -toc::[] - -After you configure your environment for hosted control planes and create a hosted cluster, you can further manage your clusters and nodes. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -include::modules/updates-for-hosted-control-planes.adoc[leveloffset=+1] -include::modules/updating-node-pools-for-hcp.adoc[leveloffset=+1] -include::modules/configuring-node-pools-for-hcp.adoc[leveloffset=+1] -//restarting hosted control plane components -include::modules/hosted-control-planes-pause-reconciliation.adoc[leveloffset=+1] -//debugging why nodes have not joined the cluster -//using service-level DNS for control plane services -//configuring metrics sets -include::modules/node-tuning-hosted-cluster.adoc[leveloffset=+1] -include::modules/sriov-operator-hosted-control-planes.adoc[leveloffset=+1] -//automated machine management -include::modules/delete-hosted-cluster.adoc[leveloffset=+1] - diff --git a/hosted_control_planes/images b/hosted_control_planes/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/hosted_control_planes/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/hosted_control_planes/index.adoc b/hosted_control_planes/index.adoc deleted file mode 100644 index c045590ad6aa..000000000000 --- a/hosted_control_planes/index.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-overview"] -= Hosted control planes overview -include::_attributes/common-attributes.adoc[] -:context: hcp-overview - -You can deploy {product-title} clusters by using two different control plane configurations: standalone or hosted control planes. The standalone configuration uses dedicated virtual machines or physical machines to host the control plane. With hosted control planes for {product-title}, you create control planes as pods on a hosting cluster without the need for dedicated virtual or physical machines for each control plane. - -toc::[] - -include::modules/hosted-control-planes-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-addon-intro[HyperShift add-on (Technology Preview)] - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes (Technology Preview)] - -include::modules/hosted-control-planes-concepts-personas.adoc[leveloffset=+1] -include::modules/hosted-control-planes-version-support.adoc[leveloffset=+1] diff --git a/hosted_control_planes/modules b/hosted_control_planes/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/hosted_control_planes/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/hosted_control_planes/snippets b/hosted_control_planes/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/hosted_control_planes/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/identity_providers/_attributes b/identity_providers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/identity_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/identity_providers/images b/identity_providers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/identity_providers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/identity_providers/modules b/identity_providers/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/identity_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/identity_providers/snippets b/identity_providers/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/identity_providers/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/images/135_OpenShift_Distributed_Unit_0121.svg b/images/135_OpenShift_Distributed_Unit_0121.svg deleted file mode 100644 index bac62a3ed04f..000000000000 --- a/images/135_OpenShift_Distributed_Unit_0121.svg +++ /dev/null @@ -1 +0,0 @@ -RAN boundaryCentral unit (CU),control planeCentral unit (CU),user plane135_OpenShift_0121Distributed unit (DU)Radio unit (RU) \ No newline at end of file diff --git a/images/150_OpenShift_VMware_on_AWS_0321_arch.svg b/images/150_OpenShift_VMware_on_AWS_0321_arch.svg deleted file mode 100644 index 963242149269..000000000000 --- a/images/150_OpenShift_VMware_on_AWS_0321_arch.svg +++ /dev/null @@ -1 +0,0 @@ -RWO PVCsVMware Cloud on AWSOpenShiftvSphere 7OpenShift SDNOpenShift integrated load balancer and ingressNSX-TVSANvCenterContainersAppAppAppControl plane VMInfra VMWorker VM150_OpenShift_0321 \ No newline at end of file diff --git a/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png b/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png deleted file mode 100644 index 846a46474508..000000000000 Binary files a/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png and /dev/null differ diff --git a/images/152_OpenShift_Config_NTP_0421.png b/images/152_OpenShift_Config_NTP_0421.png deleted file mode 100644 index d576af889b45..000000000000 Binary files a/images/152_OpenShift_Config_NTP_0421.png and /dev/null differ diff --git a/images/152_OpenShift_Config_NTP_0421.svg b/images/152_OpenShift_Config_NTP_0421.svg deleted file mode 100644 index d9e21af5f437..000000000000 --- a/images/152_OpenShift_Config_NTP_0421.svg +++ /dev/null @@ -1 +0,0 @@ -Baremetal networkInternet accessIngress VIPAPI VIPControl plane nodes x3Worker nodes xNNTP serverNTP clientOptionalOptional152_OpenShift_0421External NTP serverRouter \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_arch.svg b/images/156_OpenShift_ROSA_Arch_0621_arch.svg deleted file mode 100644 index bcbd1c06340e..000000000000 --- a/images/156_OpenShift_ROSA_Arch_0621_arch.svg +++ /dev/null @@ -1 +0,0 @@ -Control plane nodes (x3)apiserveretcdcontrollerWorker nodes (xN)Compute (xN)Persistent storagePublicnetworkPrivate networkInternal(API) NLBRed Hat(Console) ELBExternal/internalApp ELBRed Hat(API) ELBExternal/internal(API) NLBRoute53DNSInfra nodes (x2, x3)registryroutermonitoringAWS VPCAvailability zone(x1, x3)Availability zone(x1, x3)Availability zone(x1, x3)InternetRed HatManagementDeveloper156_OpenShift_0621 \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg b/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg deleted file mode 100644 index 37f9712650fb..000000000000 --- a/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg +++ /dev/null @@ -1,258 +0,0 @@ - - - - - - Private network - - - - - - - - - - - Route53DNS - - - - - - - - - - - - - - - - - - - - - Developer - - - - - - - Red HatManagement - - - - - - - PrivateLink - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Control plane nodes (x3) - - apiserver - - etcd - - controller - - - - - - - - - - - - Worker nodes (xN) - - Compute (xN) - - Persistent storage - - API NLB - - - - - - - - - - - - - App ELB - - - - - - - - - - - - - - - - - - - - - - - - - - Infra nodes (x2, x3) - - registry - - router - - monitoring - - AWS VPC - - - - - - Availability zone(x1, x3) - Availability zone(x1, x3) - Availability zone(x1, x3) - - 156_OpenShift_1221 - - - diff --git a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png deleted file mode 100644 index a9a171c35912..000000000000 Binary files a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png and /dev/null differ diff --git a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg deleted file mode 100644 index 4a5508c8c975..000000000000 --- a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg +++ /dev/null @@ -1 +0,0 @@ -Out-of-Band Managementnetwork (optional)RouterControl plane nodes x3Baremetal networkProvisioning network (optional)RouterDHCP serverDNS server161_OpenShift_0521Internet accessProvisioning nodeBootstrap VMWorker nodes xNProvisioning bridgeeno1Baremetal bridgeeno2Ingress VIPAPI VIP \ No newline at end of file diff --git a/images/175_OpenShift_ACM_0821_1.png b/images/175_OpenShift_ACM_0821_1.png deleted file mode 100644 index 61743c591d6f..000000000000 Binary files a/images/175_OpenShift_ACM_0821_1.png and /dev/null differ diff --git a/images/175_OpenShift_ACM_0821_2.png b/images/175_OpenShift_ACM_0821_2.png deleted file mode 100644 index 7f517282613e..000000000000 Binary files a/images/175_OpenShift_ACM_0821_2.png and /dev/null differ diff --git a/images/176_OpenShift_zero_touch_provisioning_0821.png b/images/176_OpenShift_zero_touch_provisioning_0821.png deleted file mode 100644 index 766887b66ada..000000000000 Binary files a/images/176_OpenShift_zero_touch_provisioning_0821.png and /dev/null differ diff --git a/images/177_OpenShift_cluster_provisioning_0821.png b/images/177_OpenShift_cluster_provisioning_0821.png deleted file mode 100644 index eff4a6d28fc7..000000000000 Binary files a/images/177_OpenShift_cluster_provisioning_0821.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_1.png b/images/179_OpenShift_NBDE_implementation_0821_1.png deleted file mode 100644 index 7007bcbeb721..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_1.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_2.png b/images/179_OpenShift_NBDE_implementation_0821_2.png deleted file mode 100644 index 2fe21e0832db..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_2.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_3.png b/images/179_OpenShift_NBDE_implementation_0821_3.png deleted file mode 100644 index f70ddba3da14..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_3.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_4.png b/images/179_OpenShift_NBDE_implementation_0821_4.png deleted file mode 100644 index 67d574b2c50a..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_4.png and /dev/null differ diff --git a/images/183_OpenShift_ZTP_0921.png b/images/183_OpenShift_ZTP_0921.png deleted file mode 100644 index 6ec2ceac46ce..000000000000 Binary files a/images/183_OpenShift_ZTP_0921.png and /dev/null differ diff --git a/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png b/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png deleted file mode 100644 index a6afcf0b6404..000000000000 Binary files a/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png and /dev/null differ diff --git a/images/202_OpenShift_Ingress_0222_load_balancer.png b/images/202_OpenShift_Ingress_0222_load_balancer.png deleted file mode 100644 index 3d74aa80574e..000000000000 Binary files a/images/202_OpenShift_Ingress_0222_load_balancer.png and /dev/null differ diff --git a/images/202_OpenShift_Ingress_0222_node_port.png b/images/202_OpenShift_Ingress_0222_node_port.png deleted file mode 100644 index 6474dd2be45b..000000000000 Binary files a/images/202_OpenShift_Ingress_0222_node_port.png and /dev/null differ diff --git a/images/209_OpenShift_BGP_0122.png b/images/209_OpenShift_BGP_0122.png deleted file mode 100644 index 551f452b43b4..000000000000 Binary files a/images/209_OpenShift_BGP_0122.png and /dev/null differ diff --git a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png b/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png deleted file mode 100644 index 2aa781eb2fe7..000000000000 Binary files a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png and /dev/null differ diff --git a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png b/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png deleted file mode 100644 index 59ea76d55f95..000000000000 Binary files a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png and /dev/null differ diff --git a/images/211_OpenShift_Redfish_dataflow_0222.png b/images/211_OpenShift_Redfish_dataflow_0222.png deleted file mode 100644 index 6aa8ce8cbe42..000000000000 Binary files a/images/211_OpenShift_Redfish_dataflow_0222.png and /dev/null differ diff --git a/images/211_OpenShift_Redfish_dataflow_0822.png b/images/211_OpenShift_Redfish_dataflow_0822.png deleted file mode 100644 index b115a9e66190..000000000000 Binary files a/images/211_OpenShift_Redfish_dataflow_0822.png and /dev/null differ diff --git a/images/216_OpenShift_Topology-aware_Scheduling_0222.png b/images/216_OpenShift_Topology-aware_Scheduling_0222.png deleted file mode 100644 index 510a625a0204..000000000000 Binary files a/images/216_OpenShift_Topology-aware_Scheduling_0222.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png deleted file mode 100644 index 112ac405fb5c..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png deleted file mode 100644 index d2f623fb0fc2..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png deleted file mode 100644 index 13b98bb110cb..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png deleted file mode 100644 index b435ca39da53..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png deleted file mode 100644 index ef7f4f36756b..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png deleted file mode 100644 index c1d3cfc56f50..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png and /dev/null differ diff --git a/images/218_OpenShift_PTP_events_0222.png b/images/218_OpenShift_PTP_events_0222.png deleted file mode 100644 index 6d0d0025a89b..000000000000 Binary files a/images/218_OpenShift_PTP_events_0222.png and /dev/null differ diff --git a/images/247-OpenShift-Kubernetes-Overview.png b/images/247-OpenShift-Kubernetes-Overview.png deleted file mode 100644 index 7ba598c3064f..000000000000 Binary files a/images/247-OpenShift-Kubernetes-Overview.png and /dev/null differ diff --git a/images/247_OpenShift_Kubernetes_Overview-1.png b/images/247_OpenShift_Kubernetes_Overview-1.png deleted file mode 100644 index 927cf769907c..000000000000 Binary files a/images/247_OpenShift_Kubernetes_Overview-1.png and /dev/null differ diff --git a/images/247_OpenShift_Kubernetes_Overview-2.png b/images/247_OpenShift_Kubernetes_Overview-2.png deleted file mode 100644 index e645a2335a53..000000000000 Binary files a/images/247_OpenShift_Kubernetes_Overview-2.png and /dev/null differ diff --git a/images/261_OpenShift_DPDK_0722.png b/images/261_OpenShift_DPDK_0722.png deleted file mode 100644 index 3eaf53747c80..000000000000 Binary files a/images/261_OpenShift_DPDK_0722.png and /dev/null differ diff --git a/images/264_OpenShift_CNI_plugin_chain_0622.png b/images/264_OpenShift_CNI_plugin_chain_0622.png deleted file mode 100644 index 06bb772db181..000000000000 Binary files a/images/264_OpenShift_CNI_plugin_chain_0622.png and /dev/null differ diff --git a/images/264_OpenShift_CNI_plugin_chain_0722.png b/images/264_OpenShift_CNI_plugin_chain_0722.png deleted file mode 100644 index 47a35e5bdfa4..000000000000 Binary files a/images/264_OpenShift_CNI_plugin_chain_0722.png and /dev/null differ diff --git a/images/267_OpenShift_on_AWS_Access_Networking_1222.png b/images/267_OpenShift_on_AWS_Access_Networking_1222.png deleted file mode 100644 index f62d20794e5c..000000000000 Binary files a/images/267_OpenShift_on_AWS_Access_Networking_1222.png and /dev/null differ diff --git a/images/291_OpenShift_on_AWS_Intro_1122_docs.png b/images/291_OpenShift_on_AWS_Intro_1122_docs.png deleted file mode 100644 index 51ffbb751dac..000000000000 Binary files a/images/291_OpenShift_on_AWS_Intro_1122_docs.png and /dev/null differ diff --git a/images/292_OpenShift_Configuring_multi-network_policy_1122.png b/images/292_OpenShift_Configuring_multi-network_policy_1122.png deleted file mode 100644 index e1bc9d767e77..000000000000 Binary files a/images/292_OpenShift_Configuring_multi-network_policy_1122.png and /dev/null differ diff --git a/images/292_OpenShift_Configuring_multiple-network_policy_1122.png b/images/292_OpenShift_Configuring_multiple-network_policy_1122.png deleted file mode 100644 index 7f3b2d116535..000000000000 Binary files a/images/292_OpenShift_Configuring_multiple-network_policy_1122.png and /dev/null differ diff --git a/images/295_OpenShift_Nodes_Overview_1222.png b/images/295_OpenShift_Nodes_Overview_1222.png deleted file mode 100644 index 36eb557368f5..000000000000 Binary files a/images/295_OpenShift_Nodes_Overview_1222.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_00.png b/images/298_OpenShift_Backup_Restore_0123_00.png deleted file mode 100644 index f0b2c2c1a93e..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_00.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_01.png b/images/298_OpenShift_Backup_Restore_0123_01.png deleted file mode 100644 index fde99c740f0a..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_01.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_02.png b/images/298_OpenShift_Backup_Restore_0123_02.png deleted file mode 100644 index 627b468cdfdc..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_02.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_03.png b/images/298_OpenShift_Backup_Restore_0123_03.png deleted file mode 100644 index d055d8fe1884..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_03.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_04.png b/images/298_OpenShift_Backup_Restore_0123_04.png deleted file mode 100644 index b28819019140..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_04.png and /dev/null differ diff --git a/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png b/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png deleted file mode 100644 index 456a330217f3..000000000000 Binary files a/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png and /dev/null differ diff --git a/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png b/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png deleted file mode 100644 index 7bda732b17e0..000000000000 Binary files a/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png and /dev/null differ diff --git a/images/302_OpenShift_Bare_Metal_Operator_0223.png b/images/302_OpenShift_Bare_Metal_Operator_0223.png deleted file mode 100644 index d6c5a973cdd6..000000000000 Binary files a/images/302_OpenShift_Bare_Metal_Operator_0223.png and /dev/null differ diff --git a/images/310_OpenShift_machine_deletion_hooks_0223.png b/images/310_OpenShift_machine_deletion_hooks_0223.png deleted file mode 100644 index 4acaf0f3d13c..000000000000 Binary files a/images/310_OpenShift_machine_deletion_hooks_0223.png and /dev/null differ diff --git a/images/311_RHDevice_Edge_Overview_0223_1.png b/images/311_RHDevice_Edge_Overview_0223_1.png deleted file mode 100644 index b30a00edcc6c..000000000000 Binary files a/images/311_RHDevice_Edge_Overview_0223_1.png and /dev/null differ diff --git a/images/311_RHDevice_Edge_Overview_0223_2.png b/images/311_RHDevice_Edge_Overview_0223_2.png deleted file mode 100644 index fecd5e4bc039..000000000000 Binary files a/images/311_RHDevice_Edge_Overview_0223_2.png and /dev/null differ diff --git a/images/317_RHbM_OVN_topology_0323.png b/images/317_RHbM_OVN_topology_0323.png deleted file mode 100644 index 84d45a07986e..000000000000 Binary files a/images/317_RHbM_OVN_topology_0323.png and /dev/null differ diff --git a/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png b/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png deleted file mode 100644 index a62c45c03c38..000000000000 Binary files a/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png and /dev/null differ diff --git a/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png b/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png deleted file mode 100644 index bb7e4449b29f..000000000000 Binary files a/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png and /dev/null differ diff --git a/images/324_RHbM_Certificate_Rotation_0323_long-term.png b/images/324_RHbM_Certificate_Rotation_0323_long-term.png deleted file mode 100644 index c57664f62d06..000000000000 Binary files a/images/324_RHbM_Certificate_Rotation_0323_long-term.png and /dev/null differ diff --git a/images/324_RHbM_Certificate_Rotation_0323_short-term.png b/images/324_RHbM_Certificate_Rotation_0323_short-term.png deleted file mode 100644 index 9c8fcbcd8358..000000000000 Binary files a/images/324_RHbM_Certificate_Rotation_0323_short-term.png and /dev/null differ diff --git a/images/325_OpenShift_vSphere_Deployment_updates_0323.png b/images/325_OpenShift_vSphere_Deployment_updates_0323.png deleted file mode 100644 index 5f8b2e26aa7a..000000000000 Binary files a/images/325_OpenShift_vSphere_Deployment_updates_0323.png and /dev/null differ diff --git a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png b/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png deleted file mode 100644 index ac969c08eb96..000000000000 Binary files a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png and /dev/null differ diff --git a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png b/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png deleted file mode 100644 index 054e7b078181..000000000000 Binary files a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png and /dev/null differ diff --git a/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png b/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png deleted file mode 100644 index 85dba21558bf..000000000000 Binary files a/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png and /dev/null differ diff --git a/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png b/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png deleted file mode 100644 index 968b0285f989..000000000000 Binary files a/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png and /dev/null differ diff --git a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png deleted file mode 100644 index 9ddf2df1f1ff..000000000000 Binary files a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png and /dev/null differ diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg deleted file mode 100644 index 04effb30efd1..000000000000 --- a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg +++ /dev/null @@ -1 +0,0 @@ -Baremetal networkProvisioning network (optional)71_OpenShift_1020Internet accessBootstrap VMProvisioning nodeIngress VIPAPI VIPOut-of-Band Managementnetwork (optional)RouterControl plane nodes x3Worker nodes xNRouterDHCP serverDNS serverProvisioning bridgeeno1Baremetal bridgeeno2 \ No newline at end of file diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg deleted file mode 100644 index 3f07dcb5af48..000000000000 --- a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg +++ /dev/null @@ -1 +0,0 @@ -Out-of-Band Managementnetwork (optional)RouterControl plane nodes x3Baremetal networkProvisioning network (optional)RouterDHCP serverDNS server71_OpenShift_1020Internet accessProvisioning nodeBootstrap VMWorker nodes xNProvisioning bridgeeno1Baremetal bridgeeno2Ingress VIPAPI VIP \ No newline at end of file diff --git a/images/92_OpenShift_Cluster_Install_RHV_0520.png b/images/92_OpenShift_Cluster_Install_RHV_0520.png deleted file mode 100644 index 885d338abe38..000000000000 Binary files a/images/92_OpenShift_Cluster_Install_RHV_0520.png and /dev/null differ diff --git a/images/CLI-list.png b/images/CLI-list.png deleted file mode 100644 index 0e4c462d0e16..000000000000 Binary files a/images/CLI-list.png and /dev/null differ diff --git a/images/Git-access-token-secret.png b/images/Git-access-token-secret.png deleted file mode 100644 index 6c732263b879..000000000000 Binary files a/images/Git-access-token-secret.png and /dev/null differ diff --git a/images/Git-access-token.png b/images/Git-access-token.png deleted file mode 100644 index 7d533fc1c7f8..000000000000 Binary files a/images/Git-access-token.png and /dev/null differ diff --git a/images/Github-app-details.png b/images/Github-app-details.png deleted file mode 100644 index d047e1a4e279..000000000000 Binary files a/images/Github-app-details.png and /dev/null differ diff --git a/images/HPAflow.png b/images/HPAflow.png deleted file mode 100644 index 0d7e1335e860..000000000000 Binary files a/images/HPAflow.png and /dev/null differ diff --git a/images/OCP_3_to_4_App_migration.png b/images/OCP_3_to_4_App_migration.png deleted file mode 100644 index a1f14cd4dd89..000000000000 Binary files a/images/OCP_3_to_4_App_migration.png and /dev/null differ diff --git a/images/Operator_Icon-OpenShift_Virtualization-5.png b/images/Operator_Icon-OpenShift_Virtualization-5.png deleted file mode 100644 index 7fd768c9155b..000000000000 Binary files a/images/Operator_Icon-OpenShift_Virtualization-5.png and /dev/null differ diff --git a/images/VPC-Diagram.png b/images/VPC-Diagram.png deleted file mode 100644 index 862355d985f6..000000000000 Binary files a/images/VPC-Diagram.png and /dev/null differ diff --git a/images/add-serverless-app-dev.png b/images/add-serverless-app-dev.png deleted file mode 100644 index 1e69d3206d9c..000000000000 Binary files a/images/add-serverless-app-dev.png and /dev/null differ diff --git a/images/admin-console-create-binding-event-source-1.png b/images/admin-console-create-binding-event-source-1.png deleted file mode 100644 index 0f30f0cb4399..000000000000 Binary files a/images/admin-console-create-binding-event-source-1.png and /dev/null differ diff --git a/images/admin-console-create-binding-event-source-2.png b/images/admin-console-create-binding-event-source-2.png deleted file mode 100644 index 5b9b512c7baf..000000000000 Binary files a/images/admin-console-create-binding-event-source-2.png and /dev/null differ diff --git a/images/admin-console-create-role-event-source.png b/images/admin-console-create-role-event-source.png deleted file mode 100644 index 621b1b555223..000000000000 Binary files a/images/admin-console-create-role-event-source.png and /dev/null differ diff --git a/images/admin-console-create-sa-event-source.png b/images/admin-console-create-sa-event-source.png deleted file mode 100644 index a1501d594768..000000000000 Binary files a/images/admin-console-create-sa-event-source.png and /dev/null differ diff --git a/images/after-k8s-mount-propagation.png b/images/after-k8s-mount-propagation.png deleted file mode 100644 index c06e4d7b91a3..000000000000 Binary files a/images/after-k8s-mount-propagation.png and /dev/null differ diff --git a/images/agent-based-installer-workflow.png b/images/agent-based-installer-workflow.png deleted file mode 100644 index f3167a13b1d6..000000000000 Binary files a/images/agent-based-installer-workflow.png and /dev/null differ diff --git a/images/agent-tui-home.png b/images/agent-tui-home.png deleted file mode 100644 index f983aede0dda..000000000000 Binary files a/images/agent-tui-home.png and /dev/null differ diff --git a/images/api-admission-chain.png b/images/api-admission-chain.png deleted file mode 100644 index 8c483eab6d3c..000000000000 Binary files a/images/api-admission-chain.png and /dev/null differ diff --git a/images/app-launcher.png b/images/app-launcher.png deleted file mode 100644 index 6a5ac43a54c8..000000000000 Binary files a/images/app-launcher.png and /dev/null differ diff --git a/images/architecture_overview.png b/images/architecture_overview.png deleted file mode 100644 index 55f1e2cee849..000000000000 Binary files a/images/architecture_overview.png and /dev/null differ diff --git a/images/before-k8s-mount-propagation.png b/images/before-k8s-mount-propagation.png deleted file mode 100644 index 5e527ca799fb..000000000000 Binary files a/images/before-k8s-mount-propagation.png and /dev/null differ diff --git a/images/bringing_it_all_together.png b/images/bringing_it_all_together.png deleted file mode 100644 index 557d6196f208..000000000000 Binary files a/images/bringing_it_all_together.png and /dev/null differ diff --git a/images/build_process1.png b/images/build_process1.png deleted file mode 100644 index c721722629e3..000000000000 Binary files a/images/build_process1.png and /dev/null differ diff --git a/images/build_process2.png b/images/build_process2.png deleted file mode 100644 index 8edbaf557cc9..000000000000 Binary files a/images/build_process2.png and /dev/null differ diff --git a/images/click-question-mark.png b/images/click-question-mark.png deleted file mode 100644 index bf34abb4a202..000000000000 Binary files a/images/click-question-mark.png and /dev/null differ diff --git a/images/cluster-configuration-general-tab.png b/images/cluster-configuration-general-tab.png deleted file mode 100644 index ff333d488818..000000000000 Binary files a/images/cluster-configuration-general-tab.png and /dev/null differ diff --git a/images/cluster-settings-console.png b/images/cluster-settings-console.png deleted file mode 100644 index 206559d5f787..000000000000 Binary files a/images/cluster-settings-console.png and /dev/null differ diff --git a/images/cnv_components_cdi-operator.png b/images/cnv_components_cdi-operator.png deleted file mode 100644 index 26e52340e409..000000000000 Binary files a/images/cnv_components_cdi-operator.png and /dev/null differ diff --git a/images/cnv_components_cluster-network-addons-operator.png b/images/cnv_components_cluster-network-addons-operator.png deleted file mode 100644 index ffd3e871fc78..000000000000 Binary files a/images/cnv_components_cluster-network-addons-operator.png and /dev/null differ diff --git a/images/cnv_components_hco-operator.png b/images/cnv_components_hco-operator.png deleted file mode 100644 index 36adf5bb9e7e..000000000000 Binary files a/images/cnv_components_hco-operator.png and /dev/null differ diff --git a/images/cnv_components_hpp-operator.png b/images/cnv_components_hpp-operator.png deleted file mode 100644 index 57f9a4ada49c..000000000000 Binary files a/images/cnv_components_hpp-operator.png and /dev/null differ diff --git a/images/cnv_components_main.png b/images/cnv_components_main.png deleted file mode 100644 index 45577eb8a51d..000000000000 Binary files a/images/cnv_components_main.png and /dev/null differ diff --git a/images/cnv_components_ssp-operator.png b/images/cnv_components_ssp-operator.png deleted file mode 100644 index 68166b451d7d..000000000000 Binary files a/images/cnv_components_ssp-operator.png and /dev/null differ diff --git a/images/cnv_components_tekton-tasks-operator.png b/images/cnv_components_tekton-tasks-operator.png deleted file mode 100644 index e817ff35e15d..000000000000 Binary files a/images/cnv_components_tekton-tasks-operator.png and /dev/null differ diff --git a/images/cnv_components_virt-operator.png b/images/cnv_components_virt-operator.png deleted file mode 100644 index 844fd378c8db..000000000000 Binary files a/images/cnv_components_virt-operator.png and /dev/null differ diff --git a/images/create-event-sink.png b/images/create-event-sink.png deleted file mode 100644 index 14e6cf205459..000000000000 Binary files a/images/create-event-sink.png and /dev/null differ diff --git a/images/create-eventing-namespace.png b/images/create-eventing-namespace.png deleted file mode 100644 index cd316ed23011..000000000000 Binary files a/images/create-eventing-namespace.png and /dev/null differ diff --git a/images/create-nodes-okd.png b/images/create-nodes-okd.png deleted file mode 100644 index 2abc14c89aca..000000000000 Binary files a/images/create-nodes-okd.png and /dev/null differ diff --git a/images/create-nodes.png b/images/create-nodes.png deleted file mode 100644 index 07d9ea6a5fbe..000000000000 Binary files a/images/create-nodes.png and /dev/null differ diff --git a/images/create-push-app.png b/images/create-push-app.png deleted file mode 100644 index a81d88d2fa1c..000000000000 Binary files a/images/create-push-app.png and /dev/null differ diff --git a/images/create-serving-namespace.png b/images/create-serving-namespace.png deleted file mode 100644 index 9d8ef2abb9ce..000000000000 Binary files a/images/create-serving-namespace.png and /dev/null differ diff --git a/images/create-silence.png b/images/create-silence.png deleted file mode 100644 index 4085666a23df..000000000000 Binary files a/images/create-silence.png and /dev/null differ diff --git a/images/create-sub-ODC.png b/images/create-sub-ODC.png deleted file mode 100644 index 54a81308a898..000000000000 Binary files a/images/create-sub-ODC.png and /dev/null differ diff --git a/images/csi-arch-rev1.png b/images/csi-arch-rev1.png deleted file mode 100644 index 5ebab3f41b7c..000000000000 Binary files a/images/csi-arch-rev1.png and /dev/null differ diff --git a/images/csi-arch.png b/images/csi-arch.png deleted file mode 100644 index ffaa509ae5ab..000000000000 Binary files a/images/csi-arch.png and /dev/null differ diff --git a/images/cso-namespace-vulnerable.png b/images/cso-namespace-vulnerable.png deleted file mode 100644 index 948a6dc81276..000000000000 Binary files a/images/cso-namespace-vulnerable.png and /dev/null differ diff --git a/images/cso-registry-vulnerable.png b/images/cso-registry-vulnerable.png deleted file mode 100644 index c9b147d11cf3..000000000000 Binary files a/images/cso-registry-vulnerable.png and /dev/null differ diff --git a/images/custom_4.5.png b/images/custom_4.5.png deleted file mode 100644 index 7a8d1607d2cd..000000000000 Binary files a/images/custom_4.5.png and /dev/null differ diff --git a/images/custom_4.5.svg b/images/custom_4.5.svg deleted file mode 100644 index 8116cb5e88eb..000000000000 --- a/images/custom_4.5.svg +++ /dev/null @@ -1 +0,0 @@ -namespace/openshift-authenticationsecrets/v4-0-config-system-router-certs11namespace/openshift-consoleconfigmaps/default-ingress-cert11configmaps/serviceacount-ca11namespace/openshift-kube-controller-managerconfigmaps/serviceacount-ca11namespace/openshift-kube-schedulerconfigmaps/proxy-ca10configmaps/my-certificatenamespace/openshift-ingressnamespace/openshift-ingress-operatornamespace/openshift-config-manageddeployments/router-default3secrets/my-certificate2ingresscontrollers/defaultReferenceContents are copied0namespace/openshift-config8proxies/cluster9User7cluster-scopedsecrets/router-certs4configmaps/default-ingress-cert5configmaps/trusted-ca-bundle6Requesting namespaces75_OpenShift_0520 \ No newline at end of file diff --git a/images/customizing-user-perspective.png b/images/customizing-user-perspective.png deleted file mode 100644 index ffa0c674ddce..000000000000 Binary files a/images/customizing-user-perspective.png and /dev/null differ diff --git a/images/darkcircle-0.png b/images/darkcircle-0.png deleted file mode 100644 index 5ab465076d8f..000000000000 Binary files a/images/darkcircle-0.png and /dev/null differ diff --git a/images/darkcircle-1.png b/images/darkcircle-1.png deleted file mode 100644 index 7b16d8ed9932..000000000000 Binary files a/images/darkcircle-1.png and /dev/null differ diff --git a/images/darkcircle-10.png b/images/darkcircle-10.png deleted file mode 100644 index dfdc4f8d4ea4..000000000000 Binary files a/images/darkcircle-10.png and /dev/null differ diff --git a/images/darkcircle-11.png b/images/darkcircle-11.png deleted file mode 100644 index 9bdcfea71d14..000000000000 Binary files a/images/darkcircle-11.png and /dev/null differ diff --git a/images/darkcircle-12.png b/images/darkcircle-12.png deleted file mode 100644 index 303bcd41f52d..000000000000 Binary files a/images/darkcircle-12.png and /dev/null differ diff --git a/images/darkcircle-2.png b/images/darkcircle-2.png deleted file mode 100644 index a537be6f42c8..000000000000 Binary files a/images/darkcircle-2.png and /dev/null differ diff --git a/images/darkcircle-3.png b/images/darkcircle-3.png deleted file mode 100644 index a22625c683a8..000000000000 Binary files a/images/darkcircle-3.png and /dev/null differ diff --git a/images/darkcircle-4.png b/images/darkcircle-4.png deleted file mode 100644 index 27d03e8c1f30..000000000000 Binary files a/images/darkcircle-4.png and /dev/null differ diff --git a/images/darkcircle-5.png b/images/darkcircle-5.png deleted file mode 100644 index 8a59e47b1498..000000000000 Binary files a/images/darkcircle-5.png and /dev/null differ diff --git a/images/darkcircle-6.png b/images/darkcircle-6.png deleted file mode 100644 index c8b686908d54..000000000000 Binary files a/images/darkcircle-6.png and /dev/null differ diff --git a/images/darkcircle-7.png b/images/darkcircle-7.png deleted file mode 100644 index 2503523947d4..000000000000 Binary files a/images/darkcircle-7.png and /dev/null differ diff --git a/images/darkcircle-8.png b/images/darkcircle-8.png deleted file mode 100644 index b14edd088c2c..000000000000 Binary files a/images/darkcircle-8.png and /dev/null differ diff --git a/images/darkcircle-9.png b/images/darkcircle-9.png deleted file mode 100644 index 5cc237f70578..000000000000 Binary files a/images/darkcircle-9.png and /dev/null differ diff --git a/images/default_4.5.png b/images/default_4.5.png deleted file mode 100644 index 9aa577d33900..000000000000 Binary files a/images/default_4.5.png and /dev/null differ diff --git a/images/default_4.5.svg b/images/default_4.5.svg deleted file mode 100644 index 42ea9d8ad941..000000000000 --- a/images/default_4.5.svg +++ /dev/null @@ -1 +0,0 @@ -namespace/openshift-authenticationsecrets/v4-0-config-system-router-certs11configmaps/detault-ingress-cert11configmaps/serviceacount-ca11namespace/openshift-kube-controller-managerconfigmaps/serviceacount-ca11namespace/openshift-kube-schedulernamespace/openshift-ingressnamespace/openshift-ingress-operatornamespace/openshift-config-manageddeployments/router-default3secrets/router-certs-default2ingresscontrollers/defaultReferenceContents are copied0secrets/router-certs4configmaps/default-ingress-cert5secrets/router-ca1namespace/openshift-console75_OpenShift_0520 \ No newline at end of file diff --git a/images/delete-apiserversource-odc.png b/images/delete-apiserversource-odc.png deleted file mode 100644 index eec08d930913..000000000000 Binary files a/images/delete-apiserversource-odc.png and /dev/null differ diff --git a/images/delete.png b/images/delete.png deleted file mode 100644 index cec427f39142..000000000000 Binary files a/images/delete.png and /dev/null differ diff --git a/images/developer-catalog.png b/images/developer-catalog.png deleted file mode 100644 index e72996af91aa..000000000000 Binary files a/images/developer-catalog.png and /dev/null differ diff --git a/images/dpdk_line_rate.png b/images/dpdk_line_rate.png deleted file mode 100644 index b417377ded23..000000000000 Binary files a/images/dpdk_line_rate.png and /dev/null differ diff --git a/images/ellipsis-v.svg b/images/ellipsis-v.svg deleted file mode 100644 index c3074e62602a..000000000000 --- a/images/ellipsis-v.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/etcd-monitoring-working.png b/images/etcd-monitoring-working.png deleted file mode 100644 index 0c8d1ded9b5e..000000000000 Binary files a/images/etcd-monitoring-working.png and /dev/null differ diff --git a/images/etcd-no-certificate.png b/images/etcd-no-certificate.png deleted file mode 100644 index 7af701b81cf7..000000000000 Binary files a/images/etcd-no-certificate.png and /dev/null differ diff --git a/images/etcd-operator-overview.png b/images/etcd-operator-overview.png deleted file mode 100644 index 72ae3f7901b5..000000000000 Binary files a/images/etcd-operator-overview.png and /dev/null differ diff --git a/images/etcd-operator-resources.png b/images/etcd-operator-resources.png deleted file mode 100644 index dce86b5e8eec..000000000000 Binary files a/images/etcd-operator-resources.png and /dev/null differ diff --git a/images/event-sources-odc.png b/images/event-sources-odc.png deleted file mode 100644 index d3a13fea55a3..000000000000 Binary files a/images/event-sources-odc.png and /dev/null differ diff --git a/images/eventing-YAML-HA.png b/images/eventing-YAML-HA.png deleted file mode 100644 index c928146179ba..000000000000 Binary files a/images/eventing-YAML-HA.png and /dev/null differ diff --git a/images/eventing-conditions-true.png b/images/eventing-conditions-true.png deleted file mode 100644 index ba904f8fcf61..000000000000 Binary files a/images/eventing-conditions-true.png and /dev/null differ diff --git a/images/eventing-overview.png b/images/eventing-overview.png deleted file mode 100644 index 4264c5bc4d61..000000000000 Binary files a/images/eventing-overview.png and /dev/null differ diff --git a/images/export-application-dialog-box.png b/images/export-application-dialog-box.png deleted file mode 100644 index e069ae402239..000000000000 Binary files a/images/export-application-dialog-box.png and /dev/null differ diff --git a/images/filtered-messages.png b/images/filtered-messages.png deleted file mode 100644 index a2849401fe99..000000000000 Binary files a/images/filtered-messages.png and /dev/null differ diff --git a/images/flow1.png b/images/flow1.png deleted file mode 100644 index 0c1201620c33..000000000000 Binary files a/images/flow1.png and /dev/null differ diff --git a/images/flow2.png b/images/flow2.png deleted file mode 100644 index 105d2dbea3c2..000000000000 Binary files a/images/flow2.png and /dev/null differ diff --git a/images/flow3.png b/images/flow3.png deleted file mode 100644 index af0b1c94c442..000000000000 Binary files a/images/flow3.png and /dev/null differ diff --git a/images/flow4.png b/images/flow4.png deleted file mode 100644 index dcf737241600..000000000000 Binary files a/images/flow4.png and /dev/null differ diff --git a/images/getting-started-create-route-view-app.png b/images/getting-started-create-route-view-app.png deleted file mode 100644 index fe45d96de039..000000000000 Binary files a/images/getting-started-create-route-view-app.png and /dev/null differ diff --git a/images/getting-started-examine-pod.png b/images/getting-started-examine-pod.png deleted file mode 100644 index 18114693fa6b..000000000000 Binary files a/images/getting-started-examine-pod.png and /dev/null differ diff --git a/images/getting-started-map-national-parks.png b/images/getting-started-map-national-parks.png deleted file mode 100644 index 5c15da96beb5..000000000000 Binary files a/images/getting-started-map-national-parks.png and /dev/null differ diff --git a/images/getting-started-perspective-selector.png b/images/getting-started-perspective-selector.png deleted file mode 100644 index 492a3e5cec12..000000000000 Binary files a/images/getting-started-perspective-selector.png and /dev/null differ diff --git a/images/getting-started-scaling-pod.png b/images/getting-started-scaling-pod.png deleted file mode 100644 index 10078565c764..000000000000 Binary files a/images/getting-started-scaling-pod.png and /dev/null differ diff --git a/images/grid.png b/images/grid.png deleted file mode 100644 index 57998b4e58e7..000000000000 Binary files a/images/grid.png and /dev/null differ diff --git a/images/hosted-control-planes-diagram.png b/images/hosted-control-planes-diagram.png deleted file mode 100644 index aa2d465d8be8..000000000000 Binary files a/images/hosted-control-planes-diagram.png and /dev/null differ diff --git a/images/icon-link.png b/images/icon-link.png deleted file mode 100644 index 530de47f03c4..000000000000 Binary files a/images/icon-link.png and /dev/null differ diff --git a/images/icon-pencil.png b/images/icon-pencil.png deleted file mode 100644 index 3104d3b60ebc..000000000000 Binary files a/images/icon-pencil.png and /dev/null differ diff --git a/images/image_security.png b/images/image_security.png deleted file mode 100644 index a1254d2a273a..000000000000 Binary files a/images/image_security.png and /dev/null differ diff --git a/images/img_power.png b/images/img_power.png deleted file mode 100644 index 419fa79a55a1..000000000000 Binary files a/images/img_power.png and /dev/null differ diff --git a/images/ingress-certificates-workflow.png b/images/ingress-certificates-workflow.png deleted file mode 100644 index 77edf2652acf..000000000000 Binary files a/images/ingress-certificates-workflow.png and /dev/null differ diff --git a/images/installconfig.png b/images/installconfig.png deleted file mode 100644 index cf813b8d0e9c..000000000000 Binary files a/images/installconfig.png and /dev/null differ diff --git a/images/kafka-YAML-HA.png b/images/kafka-YAML-HA.png deleted file mode 100644 index 10252db282f2..000000000000 Binary files a/images/kafka-YAML-HA.png and /dev/null differ diff --git a/images/kebab.png b/images/kebab.png deleted file mode 100644 index 81893bd4ad10..000000000000 Binary files a/images/kebab.png and /dev/null differ diff --git a/images/knative-admin-health-status-dash.png b/images/knative-admin-health-status-dash.png deleted file mode 100644 index 90854acc3679..000000000000 Binary files a/images/knative-admin-health-status-dash.png and /dev/null differ diff --git a/images/knative-kafka-overview.png b/images/knative-kafka-overview.png deleted file mode 100644 index 2be315547a64..000000000000 Binary files a/images/knative-kafka-overview.png and /dev/null differ diff --git a/images/knative-service-architecture.png b/images/knative-service-architecture.png deleted file mode 100644 index 4d171664eea1..000000000000 Binary files a/images/knative-service-architecture.png and /dev/null differ diff --git a/images/knative-serving-created.png b/images/knative-serving-created.png deleted file mode 100644 index d6e4f6a47720..000000000000 Binary files a/images/knative-serving-created.png and /dev/null differ diff --git a/images/knative-serving-overview.png b/images/knative-serving-overview.png deleted file mode 100644 index 7980664456f3..000000000000 Binary files a/images/knative-serving-overview.png and /dev/null differ diff --git a/images/microshift-cert-rotation.png b/images/microshift-cert-rotation.png deleted file mode 100644 index 49497bbf5f5b..000000000000 Binary files a/images/microshift-cert-rotation.png and /dev/null differ diff --git a/images/microshift_ovn_topology.png b/images/microshift_ovn_topology.png deleted file mode 100644 index 55db68eeed96..000000000000 Binary files a/images/microshift_ovn_topology.png and /dev/null differ diff --git a/images/migration-PV-copy.png b/images/migration-PV-copy.png deleted file mode 100644 index a5b5baa39d98..000000000000 Binary files a/images/migration-PV-copy.png and /dev/null differ diff --git a/images/migration-PV-move.png b/images/migration-PV-move.png deleted file mode 100644 index 291fce220cf7..000000000000 Binary files a/images/migration-PV-move.png and /dev/null differ diff --git a/images/migration-architecture.png b/images/migration-architecture.png deleted file mode 100644 index 3ba8f035d15c..000000000000 Binary files a/images/migration-architecture.png and /dev/null differ diff --git a/images/mixed-windows-linux-workloads.png b/images/mixed-windows-linux-workloads.png deleted file mode 100644 index c4b9748df19c..000000000000 Binary files a/images/mixed-windows-linux-workloads.png and /dev/null differ diff --git a/images/monitoring-alert-overview.png b/images/monitoring-alert-overview.png deleted file mode 100644 index c12d2f088e41..000000000000 Binary files a/images/monitoring-alert-overview.png and /dev/null differ diff --git a/images/monitoring-alerting-rule-overview.png b/images/monitoring-alerting-rule-overview.png deleted file mode 100644 index 812668b53be5..000000000000 Binary files a/images/monitoring-alerting-rule-overview.png and /dev/null differ diff --git a/images/monitoring-alerting-rules-screen.png b/images/monitoring-alerting-rules-screen.png deleted file mode 100644 index 401365332e17..000000000000 Binary files a/images/monitoring-alerting-rules-screen.png and /dev/null differ diff --git a/images/monitoring-alerts-screen.png b/images/monitoring-alerts-screen.png deleted file mode 100644 index 0ed404546a33..000000000000 Binary files a/images/monitoring-alerts-screen.png and /dev/null differ diff --git a/images/monitoring-architecture.png b/images/monitoring-architecture.png deleted file mode 100644 index b3d15f0cbbeb..000000000000 Binary files a/images/monitoring-architecture.png and /dev/null differ diff --git a/images/monitoring-dashboard-administrator.png b/images/monitoring-dashboard-administrator.png deleted file mode 100644 index 1138a2fc4722..000000000000 Binary files a/images/monitoring-dashboard-administrator.png and /dev/null differ diff --git a/images/monitoring-dashboard-compute-resources.png b/images/monitoring-dashboard-compute-resources.png deleted file mode 100644 index ca1cdd7e209e..000000000000 Binary files a/images/monitoring-dashboard-compute-resources.png and /dev/null differ diff --git a/images/monitoring-dashboard-developer.png b/images/monitoring-dashboard-developer.png deleted file mode 100644 index f59847857a21..000000000000 Binary files a/images/monitoring-dashboard-developer.png and /dev/null differ diff --git a/images/monitoring-diagram.png b/images/monitoring-diagram.png deleted file mode 100644 index 3e76816e5ecf..000000000000 Binary files a/images/monitoring-diagram.png and /dev/null differ diff --git a/images/monitoring-metrics-developer.png b/images/monitoring-metrics-developer.png deleted file mode 100644 index cbd0abaa88c0..000000000000 Binary files a/images/monitoring-metrics-developer.png and /dev/null differ diff --git a/images/monitoring-metrics-screen.png b/images/monitoring-metrics-screen.png deleted file mode 100644 index 4058f7f196dd..000000000000 Binary files a/images/monitoring-metrics-screen.png and /dev/null differ diff --git a/images/monitoring-silences-screen.png b/images/monitoring-silences-screen.png deleted file mode 100644 index e46f5925cbad..000000000000 Binary files a/images/monitoring-silences-screen.png and /dev/null differ diff --git a/images/monitoring-yaml-screen.png b/images/monitoring-yaml-screen.png deleted file mode 100644 index f18af6b0cc02..000000000000 Binary files a/images/monitoring-yaml-screen.png and /dev/null differ diff --git a/images/node-add-hpa-action.png b/images/node-add-hpa-action.png deleted file mode 100644 index 01f7c73fd779..000000000000 Binary files a/images/node-add-hpa-action.png and /dev/null differ diff --git a/images/node-tuning-operator-workflow-revised.png b/images/node-tuning-operator-workflow-revised.png deleted file mode 100644 index a272e5854265..000000000000 Binary files a/images/node-tuning-operator-workflow-revised.png and /dev/null differ diff --git a/images/not-placetools.png b/images/not-placetools.png deleted file mode 100644 index cb70ff8c0325..000000000000 Binary files a/images/not-placetools.png and /dev/null differ diff --git a/images/nw-egress-ips-diagram.svg b/images/nw-egress-ips-diagram.svg deleted file mode 100644 index 3c996e5648dc..000000000000 --- a/images/nw-egress-ips-diagram.svg +++ /dev/null @@ -1 +0,0 @@ -192.168.126.102Node 3meta:name: node3labels:k8s.ovn.org/egress-assignable: ""Pod network10.128.0.0/14Infrastructure network192.168.126.0/18Externalservice192.168.126.10pod4pod2pod1Node 1meta:name: node1labels:k8s.ovn.org/egress-assignable: ""namespace1namespace2121_OpenShift_1020Node 2meta:name: node2pod3 \ No newline at end of file diff --git a/images/nw-ipsec-encryption.png b/images/nw-ipsec-encryption.png deleted file mode 100644 index 0d1fc46201b1..000000000000 Binary files a/images/nw-ipsec-encryption.png and /dev/null differ diff --git a/images/nw-metallb-layer2.png b/images/nw-metallb-layer2.png deleted file mode 100644 index 3cc343622e17..000000000000 Binary files a/images/nw-metallb-layer2.png and /dev/null differ diff --git a/images/nw-sharding-namespace-labels.png b/images/nw-sharding-namespace-labels.png deleted file mode 100644 index 64b4e5cc3125..000000000000 Binary files a/images/nw-sharding-namespace-labels.png and /dev/null differ diff --git a/images/nw-sharding-route-labels.png b/images/nw-sharding-route-labels.png deleted file mode 100644 index 995e2c137c0d..000000000000 Binary files a/images/nw-sharding-route-labels.png and /dev/null differ diff --git a/images/observe-dashboard-developer.png b/images/observe-dashboard-developer.png deleted file mode 100644 index d502fbf9c064..000000000000 Binary files a/images/observe-dashboard-developer.png and /dev/null differ diff --git a/images/odc-binding-connector.png b/images/odc-binding-connector.png deleted file mode 100644 index a4606e850ced..000000000000 Binary files a/images/odc-binding-connector.png and /dev/null differ diff --git a/images/odc-delete-service-binding.png b/images/odc-delete-service-binding.png deleted file mode 100644 index b8e40f63b61e..000000000000 Binary files a/images/odc-delete-service-binding.png and /dev/null differ diff --git a/images/odc-label-selector-sb-details.png b/images/odc-label-selector-sb-details.png deleted file mode 100644 index 110aa7dd7aff..000000000000 Binary files a/images/odc-label-selector-sb-details.png and /dev/null differ diff --git a/images/odc-label-selector-topology-side-panel.png b/images/odc-label-selector-topology-side-panel.png deleted file mode 100644 index 0d25dc9a5447..000000000000 Binary files a/images/odc-label-selector-topology-side-panel.png and /dev/null differ diff --git a/images/odc-recreate-update.png b/images/odc-recreate-update.png deleted file mode 100644 index a0fda1238410..000000000000 Binary files a/images/odc-recreate-update.png and /dev/null differ diff --git a/images/odc-rolling-update.png b/images/odc-rolling-update.png deleted file mode 100644 index 1fa736639419..000000000000 Binary files a/images/odc-rolling-update.png and /dev/null differ diff --git a/images/odc-sbc-modal.png b/images/odc-sbc-modal.png deleted file mode 100644 index c8fcf755fd75..000000000000 Binary files a/images/odc-sbc-modal.png and /dev/null differ diff --git a/images/odc-serverless-app.png b/images/odc-serverless-app.png deleted file mode 100644 index 8378acde156f..000000000000 Binary files a/images/odc-serverless-app.png and /dev/null differ diff --git a/images/odc-serverless-revisions.png b/images/odc-serverless-revisions.png deleted file mode 100644 index b373b4753bdc..000000000000 Binary files a/images/odc-serverless-revisions.png and /dev/null differ diff --git a/images/odc-view-broker.png b/images/odc-view-broker.png deleted file mode 100644 index d8ed44717f51..000000000000 Binary files a/images/odc-view-broker.png and /dev/null differ diff --git a/images/odc-wto-icon.png b/images/odc-wto-icon.png deleted file mode 100644 index 2c323700f609..000000000000 Binary files a/images/odc-wto-icon.png and /dev/null differ diff --git a/images/odc_add_to_project.png b/images/odc_add_to_project.png deleted file mode 100644 index 5b276655a9e4..000000000000 Binary files a/images/odc_add_to_project.png and /dev/null differ diff --git a/images/odc_add_view.png b/images/odc_add_view.png deleted file mode 100644 index d343a614fc41..000000000000 Binary files a/images/odc_add_view.png and /dev/null differ diff --git a/images/odc_app_grouping_label.png b/images/odc_app_grouping_label.png deleted file mode 100644 index d30ed4ea8a30..000000000000 Binary files a/images/odc_app_grouping_label.png and /dev/null differ diff --git a/images/odc_app_metrics.png b/images/odc_app_metrics.png deleted file mode 100644 index d81ae7902451..000000000000 Binary files a/images/odc_app_metrics.png and /dev/null differ diff --git a/images/odc_application_topology.png b/images/odc_application_topology.png deleted file mode 100644 index f418a45cfe97..000000000000 Binary files a/images/odc_application_topology.png and /dev/null differ diff --git a/images/odc_build_canceled.png b/images/odc_build_canceled.png deleted file mode 100644 index 0e4da383baf5..000000000000 Binary files a/images/odc_build_canceled.png and /dev/null differ diff --git a/images/odc_build_completed.png b/images/odc_build_completed.png deleted file mode 100644 index fa55d761cf79..000000000000 Binary files a/images/odc_build_completed.png and /dev/null differ diff --git a/images/odc_build_failed.png b/images/odc_build_failed.png deleted file mode 100644 index eb5a62cd455f..000000000000 Binary files a/images/odc_build_failed.png and /dev/null differ diff --git a/images/odc_build_new.png b/images/odc_build_new.png deleted file mode 100644 index 52cb97fd6b2c..000000000000 Binary files a/images/odc_build_new.png and /dev/null differ diff --git a/images/odc_build_pending.png b/images/odc_build_pending.png deleted file mode 100644 index a9d4615733d3..000000000000 Binary files a/images/odc_build_pending.png and /dev/null differ diff --git a/images/odc_build_running.png b/images/odc_build_running.png deleted file mode 100644 index a0aaa83590f0..000000000000 Binary files a/images/odc_build_running.png and /dev/null differ diff --git a/images/odc_che_workspace.png b/images/odc_che_workspace.png deleted file mode 100644 index fd17944d96c3..000000000000 Binary files a/images/odc_che_workspace.png and /dev/null differ diff --git a/images/odc_cluster_console.png b/images/odc_cluster_console.png deleted file mode 100644 index 0dd37fbade82..000000000000 Binary files a/images/odc_cluster_console.png and /dev/null differ diff --git a/images/odc_connecting_multiple_applications.png b/images/odc_connecting_multiple_applications.png deleted file mode 100644 index 9d18ea3cca47..000000000000 Binary files a/images/odc_connecting_multiple_applications.png and /dev/null differ diff --git a/images/odc_connector.png b/images/odc_connector.png deleted file mode 100644 index 6add9a7dfc26..000000000000 Binary files a/images/odc_connector.png and /dev/null differ diff --git a/images/odc_context_menu.png b/images/odc_context_menu.png deleted file mode 100644 index ef8905e784a8..000000000000 Binary files a/images/odc_context_menu.png and /dev/null differ diff --git a/images/odc_context_operator.png b/images/odc_context_operator.png deleted file mode 100644 index 112c608ecc78..000000000000 Binary files a/images/odc_context_operator.png and /dev/null differ diff --git a/images/odc_context_project.png b/images/odc_context_project.png deleted file mode 100644 index 8342c69d3dbc..000000000000 Binary files a/images/odc_context_project.png and /dev/null differ diff --git a/images/odc_create_project.png b/images/odc_create_project.png deleted file mode 100644 index 23a8dda1916f..000000000000 Binary files a/images/odc_create_project.png and /dev/null differ diff --git a/images/odc_customizing_developer_catalog.png b/images/odc_customizing_developer_catalog.png deleted file mode 100644 index 8fd236feb74b..000000000000 Binary files a/images/odc_customizing_developer_catalog.png and /dev/null differ diff --git a/images/odc_deleting_deployment.png b/images/odc_deleting_deployment.png deleted file mode 100644 index 400329ce5794..000000000000 Binary files a/images/odc_deleting_deployment.png and /dev/null differ diff --git a/images/odc_devcatalog_toplogy.png b/images/odc_devcatalog_toplogy.png deleted file mode 100644 index 951ede330b87..000000000000 Binary files a/images/odc_devcatalog_toplogy.png and /dev/null differ diff --git a/images/odc_developer_perspective.png b/images/odc_developer_perspective.png deleted file mode 100644 index ad24511c19c0..000000000000 Binary files a/images/odc_developer_perspective.png and /dev/null differ diff --git a/images/odc_edit_app.png b/images/odc_edit_app.png deleted file mode 100644 index bdf41a27b477..000000000000 Binary files a/images/odc_edit_app.png and /dev/null differ diff --git a/images/odc_edit_redeploy.png b/images/odc_edit_redeploy.png deleted file mode 100644 index 3708f2868262..000000000000 Binary files a/images/odc_edit_redeploy.png and /dev/null differ diff --git a/images/odc_git_repository.png b/images/odc_git_repository.png deleted file mode 100644 index 4b22c5d567c1..000000000000 Binary files a/images/odc_git_repository.png and /dev/null differ diff --git a/images/odc_helm_chart_devcatalog.png b/images/odc_helm_chart_devcatalog.png deleted file mode 100644 index f9e8bd6ab935..000000000000 Binary files a/images/odc_helm_chart_devcatalog.png and /dev/null differ diff --git a/images/odc_helm_chart_devcatalog_new.png b/images/odc_helm_chart_devcatalog_new.png deleted file mode 100644 index aadfb8c8aff9..000000000000 Binary files a/images/odc_helm_chart_devcatalog_new.png and /dev/null differ diff --git a/images/odc_helm_chart_repo_filter.png b/images/odc_helm_chart_repo_filter.png deleted file mode 100644 index bfc44325b1a2..000000000000 Binary files a/images/odc_helm_chart_repo_filter.png and /dev/null differ diff --git a/images/odc_helm_chart_select_chart_ver.png b/images/odc_helm_chart_select_chart_ver.png deleted file mode 100644 index 6d7b52aa2d92..000000000000 Binary files a/images/odc_helm_chart_select_chart_ver.png and /dev/null differ diff --git a/images/odc_helm_revision_history.png b/images/odc_helm_revision_history.png deleted file mode 100644 index 1cee05afb284..000000000000 Binary files a/images/odc_helm_revision_history.png and /dev/null differ diff --git a/images/odc_image_vulnerabilities.png b/images/odc_image_vulnerabilities.png deleted file mode 100644 index 484788a032e5..000000000000 Binary files a/images/odc_image_vulnerabilities.png and /dev/null differ diff --git a/images/odc_info.png b/images/odc_info.png deleted file mode 100644 index c59cfc70e233..000000000000 Binary files a/images/odc_info.png and /dev/null differ diff --git a/images/odc_list_view_icon.png b/images/odc_list_view_icon.png deleted file mode 100644 index 4a6013aba005..000000000000 Binary files a/images/odc_list_view_icon.png and /dev/null differ diff --git a/images/odc_namespace_helm_chart_repo_filter.png b/images/odc_namespace_helm_chart_repo_filter.png deleted file mode 100644 index 77400ca84046..000000000000 Binary files a/images/odc_namespace_helm_chart_repo_filter.png and /dev/null differ diff --git a/images/odc_observe_dashboard.png b/images/odc_observe_dashboard.png deleted file mode 100644 index 199b96cd7354..000000000000 Binary files a/images/odc_observe_dashboard.png and /dev/null differ diff --git a/images/odc_open_url.png b/images/odc_open_url.png deleted file mode 100644 index ad5498ec94a2..000000000000 Binary files a/images/odc_open_url.png and /dev/null differ diff --git a/images/odc_pod_failed.png b/images/odc_pod_failed.png deleted file mode 100644 index 94bb0a686b09..000000000000 Binary files a/images/odc_pod_failed.png and /dev/null differ diff --git a/images/odc_pod_not_ready.png b/images/odc_pod_not_ready.png deleted file mode 100644 index 5b0f76daa2f8..000000000000 Binary files a/images/odc_pod_not_ready.png and /dev/null differ diff --git a/images/odc_pod_pending.png b/images/odc_pod_pending.png deleted file mode 100644 index c0181dfdba37..000000000000 Binary files a/images/odc_pod_pending.png and /dev/null differ diff --git a/images/odc_pod_running.png b/images/odc_pod_running.png deleted file mode 100644 index 75c015fab634..000000000000 Binary files a/images/odc_pod_running.png and /dev/null differ diff --git a/images/odc_pod_succeeded.png b/images/odc_pod_succeeded.png deleted file mode 100644 index 257b956f58dc..000000000000 Binary files a/images/odc_pod_succeeded.png and /dev/null differ diff --git a/images/odc_pod_terminating.png b/images/odc_pod_terminating.png deleted file mode 100644 index f4f3fcdd3772..000000000000 Binary files a/images/odc_pod_terminating.png and /dev/null differ diff --git a/images/odc_pod_unknown.png b/images/odc_pod_unknown.png deleted file mode 100644 index d4d0b65664ff..000000000000 Binary files a/images/odc_pod_unknown.png and /dev/null differ diff --git a/images/odc_pod_warning.png b/images/odc_pod_warning.png deleted file mode 100644 index 8a6d5afb5bfa..000000000000 Binary files a/images/odc_pod_warning.png and /dev/null differ diff --git a/images/odc_project_alerts.png b/images/odc_project_alerts.png deleted file mode 100644 index 7266b3776829..000000000000 Binary files a/images/odc_project_alerts.png and /dev/null differ diff --git a/images/odc_project_dashboard.png b/images/odc_project_dashboard.png deleted file mode 100644 index 03da036fa655..000000000000 Binary files a/images/odc_project_dashboard.png and /dev/null differ diff --git a/images/odc_project_events.png b/images/odc_project_events.png deleted file mode 100644 index a24cc1e0903a..000000000000 Binary files a/images/odc_project_events.png and /dev/null differ diff --git a/images/odc_project_metrics.png b/images/odc_project_metrics.png deleted file mode 100644 index bd6ded4a54b4..000000000000 Binary files a/images/odc_project_metrics.png and /dev/null differ diff --git a/images/odc_project_permissions.png b/images/odc_project_permissions.png deleted file mode 100644 index b2f34484fd72..000000000000 Binary files a/images/odc_project_permissions.png and /dev/null differ diff --git a/images/odc_quick_search.png b/images/odc_quick_search.png deleted file mode 100644 index 59fbb1b9d6cc..000000000000 Binary files a/images/odc_quick_search.png and /dev/null differ diff --git a/images/odc_serverless_app.png b/images/odc_serverless_app.png deleted file mode 100644 index 2b748c403b17..000000000000 Binary files a/images/odc_serverless_app.png and /dev/null differ diff --git a/images/odc_topology_view_icon.png b/images/odc_topology_view_icon.png deleted file mode 100644 index 76b517a24145..000000000000 Binary files a/images/odc_topology_view_icon.png and /dev/null differ diff --git a/images/odc_verified_icon.png b/images/odc_verified_icon.png deleted file mode 100644 index 36c754bd9347..000000000000 Binary files a/images/odc_verified_icon.png and /dev/null differ diff --git a/images/oke-about-ocp-stack-image.png b/images/oke-about-ocp-stack-image.png deleted file mode 100644 index b74324a82ebc..000000000000 Binary files a/images/oke-about-ocp-stack-image.png and /dev/null differ diff --git a/images/oke-about.png b/images/oke-about.png deleted file mode 100644 index f8055fb1a51f..000000000000 Binary files a/images/oke-about.png and /dev/null differ diff --git a/images/oke-arch-ocp-stack.png b/images/oke-arch-ocp-stack.png deleted file mode 100644 index 92af5f6f7b9e..000000000000 Binary files a/images/oke-arch-ocp-stack.png and /dev/null differ diff --git a/images/olm-catalog-sources.png b/images/olm-catalog-sources.png deleted file mode 100644 index 2a8e7dbbc0da..000000000000 Binary files a/images/olm-catalog-sources.png and /dev/null differ diff --git a/images/olm-catalogsource.png b/images/olm-catalogsource.png deleted file mode 100644 index 7d7401cf7836..000000000000 Binary files a/images/olm-catalogsource.png and /dev/null differ diff --git a/images/olm-channels.png b/images/olm-channels.png deleted file mode 100644 index 845d72a77560..000000000000 Binary files a/images/olm-channels.png and /dev/null differ diff --git a/images/olm-manualapproval.png b/images/olm-manualapproval.png deleted file mode 100644 index 01f91f101de9..000000000000 Binary files a/images/olm-manualapproval.png and /dev/null differ diff --git a/images/olm-operator-delete.png b/images/olm-operator-delete.png deleted file mode 100644 index 1bde1467f2c2..000000000000 Binary files a/images/olm-operator-delete.png and /dev/null differ diff --git a/images/olm-operatorhub.png b/images/olm-operatorhub.png deleted file mode 100644 index 713179f2461e..000000000000 Binary files a/images/olm-operatorhub.png and /dev/null differ diff --git a/images/olm-replaces.png b/images/olm-replaces.png deleted file mode 100644 index 5394fee620c0..000000000000 Binary files a/images/olm-replaces.png and /dev/null differ diff --git a/images/olm-skipping-updates.png b/images/olm-skipping-updates.png deleted file mode 100644 index dae054588cff..000000000000 Binary files a/images/olm-skipping-updates.png and /dev/null differ diff --git a/images/olm-uptodate.png b/images/olm-uptodate.png deleted file mode 100644 index 2176cd097594..000000000000 Binary files a/images/olm-uptodate.png and /dev/null differ diff --git a/images/olm-workflow.png b/images/olm-workflow.png deleted file mode 100644 index aeb48f3ecb0e..000000000000 Binary files a/images/olm-workflow.png and /dev/null differ diff --git a/images/olm-z-stream.png b/images/olm-z-stream.png deleted file mode 100644 index b55a5ffa0c83..000000000000 Binary files a/images/olm-z-stream.png and /dev/null differ diff --git a/images/op-install-subscription.png b/images/op-install-subscription.png deleted file mode 100644 index 5f41c09482f3..000000000000 Binary files a/images/op-install-subscription.png and /dev/null differ diff --git a/images/op-installed-tile.png b/images/op-installed-tile.png deleted file mode 100644 index dfd4af39fca7..000000000000 Binary files a/images/op-installed-tile.png and /dev/null differ diff --git a/images/op-pipeline-builder-task-details.png b/images/op-pipeline-builder-task-details.png deleted file mode 100644 index 34b2f08e3fa0..000000000000 Binary files a/images/op-pipeline-builder-task-details.png and /dev/null differ diff --git a/images/op-pipeline-builder.png b/images/op-pipeline-builder.png deleted file mode 100644 index 9b19bebe7802..000000000000 Binary files a/images/op-pipeline-builder.png and /dev/null differ diff --git a/images/op-pipeline-details.png b/images/op-pipeline-details.png deleted file mode 100644 index 01cf2d79227b..000000000000 Binary files a/images/op-pipeline-details.png and /dev/null differ diff --git a/images/op-pipeline-details1.png b/images/op-pipeline-details1.png deleted file mode 100644 index 74b671989e55..000000000000 Binary files a/images/op-pipeline-details1.png and /dev/null differ diff --git a/images/op-pipeline-yaml.png b/images/op-pipeline-yaml.png deleted file mode 100644 index cdf3a2e43bae..000000000000 Binary files a/images/op-pipeline-yaml.png and /dev/null differ diff --git a/images/op_pipeline_run.png b/images/op_pipeline_run.png deleted file mode 100644 index 145915eff5e6..000000000000 Binary files a/images/op_pipeline_run.png and /dev/null differ diff --git a/images/op_pipeline_run2.png b/images/op_pipeline_run2.png deleted file mode 100644 index 5b61837a51d9..000000000000 Binary files a/images/op_pipeline_run2.png and /dev/null differ diff --git a/images/op_pipeline_topology.png b/images/op_pipeline_topology.png deleted file mode 100644 index 94d65ea12616..000000000000 Binary files a/images/op_pipeline_topology.png and /dev/null differ diff --git a/images/op_pipeline_topology1.png b/images/op_pipeline_topology1.png deleted file mode 100644 index 0462c8884019..000000000000 Binary files a/images/op_pipeline_topology1.png and /dev/null differ diff --git a/images/openshift-on-openstack-provider-network.png b/images/openshift-on-openstack-provider-network.png deleted file mode 100644 index 9700c13b2972..000000000000 Binary files a/images/openshift-on-openstack-provider-network.png and /dev/null differ diff --git a/images/operator-maturity-model.png b/images/operator-maturity-model.png deleted file mode 100644 index c4f745c299f8..000000000000 Binary files a/images/operator-maturity-model.png and /dev/null differ diff --git a/images/orchestration.png b/images/orchestration.png deleted file mode 100644 index 4e77da1c5d8f..000000000000 Binary files a/images/orchestration.png and /dev/null differ diff --git a/images/osd-monitoring-architecture.svg b/images/osd-monitoring-architecture.svg deleted file mode 100644 index 9a648fdc3788..000000000000 --- a/images/osd-monitoring-architecture.svg +++ /dev/null @@ -1 +0,0 @@ -DeployDeployQueriesInstalled by defaultPrometheusOperatorNEPAGrafanaKSMOSMTelemeterClientDeployDeployAlertsDeployUserAlertmanagerOpenShiftProjectsUser-DefinedProjectsAlertsThanos RulerPrometheusThanosQuerierQueriesPrometheusPrometheusOperatorPlatform118_OpenShift_0920DeployDeployClusterMonitoringOperatorClusterVersionOperator \ No newline at end of file diff --git a/images/osd-nodes-machinepools-about-f7619.png b/images/osd-nodes-machinepools-about-f7619.png deleted file mode 100644 index 80c5e309b255..000000000000 Binary files a/images/osd-nodes-machinepools-about-f7619.png and /dev/null differ diff --git a/images/osdk-workflow.png b/images/osdk-workflow.png deleted file mode 100644 index 2fb5236bcef3..000000000000 Binary files a/images/osdk-workflow.png and /dev/null differ diff --git a/images/osdocs-contribution-workflow.png b/images/osdocs-contribution-workflow.png deleted file mode 100644 index c24d49c16b8d..000000000000 Binary files a/images/osdocs-contribution-workflow.png and /dev/null differ diff --git a/images/ossm-adding-project-using-label-selector.png b/images/ossm-adding-project-using-label-selector.png deleted file mode 100644 index 566c4f9ae83d..000000000000 Binary files a/images/ossm-adding-project-using-label-selector.png and /dev/null differ diff --git a/images/ossm-adding-project-using-smm.png b/images/ossm-adding-project-using-smm.png deleted file mode 100644 index f0e017331271..000000000000 Binary files a/images/ossm-adding-project-using-smm.png and /dev/null differ diff --git a/images/ossm-adding-project-using-smmr.png b/images/ossm-adding-project-using-smmr.png deleted file mode 100644 index 82b7189bf869..000000000000 Binary files a/images/ossm-adding-project-using-smmr.png and /dev/null differ diff --git a/images/ossm-architecture.png b/images/ossm-architecture.png deleted file mode 100644 index 552e5a2ba61d..000000000000 Binary files a/images/ossm-architecture.png and /dev/null differ diff --git a/images/ossm-federated-mesh.png b/images/ossm-federated-mesh.png deleted file mode 100644 index c7af735dcf28..000000000000 Binary files a/images/ossm-federated-mesh.png and /dev/null differ diff --git a/images/ossm-federation-export-service.png b/images/ossm-federation-export-service.png deleted file mode 100644 index 242ef179763d..000000000000 Binary files a/images/ossm-federation-export-service.png and /dev/null differ diff --git a/images/ossm-federation-import-service.png b/images/ossm-federation-import-service.png deleted file mode 100644 index b5f5dcec2bef..000000000000 Binary files a/images/ossm-federation-import-service.png and /dev/null differ diff --git a/images/ossm-grafana-control-plane-dashboard.png b/images/ossm-grafana-control-plane-dashboard.png deleted file mode 100644 index fd6b32e7db5e..000000000000 Binary files a/images/ossm-grafana-control-plane-dashboard.png and /dev/null differ diff --git a/images/ossm-grafana-dashboard-no-traffic.png b/images/ossm-grafana-dashboard-no-traffic.png deleted file mode 100644 index 7c2182c016b3..000000000000 Binary files a/images/ossm-grafana-dashboard-no-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-home-screen.png b/images/ossm-grafana-home-screen.png deleted file mode 100644 index 0690720c6a47..000000000000 Binary files a/images/ossm-grafana-home-screen.png and /dev/null differ diff --git a/images/ossm-grafana-mesh-no-traffic.png b/images/ossm-grafana-mesh-no-traffic.png deleted file mode 100644 index c5f717e266a0..000000000000 Binary files a/images/ossm-grafana-mesh-no-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-mesh-with-traffic.png b/images/ossm-grafana-mesh-with-traffic.png deleted file mode 100644 index 37e53db9e305..000000000000 Binary files a/images/ossm-grafana-mesh-with-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-services.png b/images/ossm-grafana-services.png deleted file mode 100644 index dec1e32dcd1c..000000000000 Binary files a/images/ossm-grafana-services.png and /dev/null differ diff --git a/images/ossm-grafana-workloads.png b/images/ossm-grafana-workloads.png deleted file mode 100644 index a32e69e3e64f..000000000000 Binary files a/images/ossm-grafana-workloads.png and /dev/null differ diff --git a/images/ossm-icon-missing-sidecar.png b/images/ossm-icon-missing-sidecar.png deleted file mode 100644 index 5d50229f1810..000000000000 Binary files a/images/ossm-icon-missing-sidecar.png and /dev/null differ diff --git a/images/ossm-kiali-graph-badge-security.png b/images/ossm-kiali-graph-badge-security.png deleted file mode 100644 index 440bff3ea42b..000000000000 Binary files a/images/ossm-kiali-graph-badge-security.png and /dev/null differ diff --git a/images/ossm-kiali-graph-bookinfo.png b/images/ossm-kiali-graph-bookinfo.png deleted file mode 100644 index e34dd3d0d04c..000000000000 Binary files a/images/ossm-kiali-graph-bookinfo.png and /dev/null differ diff --git a/images/ossm-kiali-masthead-mtls-enabled.png b/images/ossm-kiali-masthead-mtls-enabled.png deleted file mode 100644 index 1ffe26bcd3d4..000000000000 Binary files a/images/ossm-kiali-masthead-mtls-enabled.png and /dev/null differ diff --git a/images/ossm-kiali-masthead-mtls-partial.png b/images/ossm-kiali-masthead-mtls-partial.png deleted file mode 100644 index 5e9302bea975..000000000000 Binary files a/images/ossm-kiali-masthead-mtls-partial.png and /dev/null differ diff --git a/images/ossm-kiali-overview.png b/images/ossm-kiali-overview.png deleted file mode 100644 index 7b36d12ca495..000000000000 Binary files a/images/ossm-kiali-overview.png and /dev/null differ diff --git a/images/ossm-node-badge-missing-sidecar.svg b/images/ossm-node-badge-missing-sidecar.svg deleted file mode 100644 index f8005984ad1e..000000000000 --- a/images/ossm-node-badge-missing-sidecar.svg +++ /dev/null @@ -1,119 +0,0 @@ - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - diff --git a/images/ossm-prometheus-home-screen.png b/images/ossm-prometheus-home-screen.png deleted file mode 100644 index c570ae4b41cc..000000000000 Binary files a/images/ossm-prometheus-home-screen.png and /dev/null differ diff --git a/images/ossm-prometheus-metrics.png b/images/ossm-prometheus-metrics.png deleted file mode 100644 index c115e31d2441..000000000000 Binary files a/images/ossm-prometheus-metrics.png and /dev/null differ diff --git a/images/pod-identity-webhook-workflow-in-user-defined-projects.png b/images/pod-identity-webhook-workflow-in-user-defined-projects.png deleted file mode 100644 index 70d811428708..000000000000 Binary files a/images/pod-identity-webhook-workflow-in-user-defined-projects.png and /dev/null differ diff --git a/images/product-workflow-overview.png b/images/product-workflow-overview.png deleted file mode 100644 index b6a7872c0f20..000000000000 Binary files a/images/product-workflow-overview.png and /dev/null differ diff --git a/images/question-circle.png b/images/question-circle.png deleted file mode 100644 index a505ba1fcadd..000000000000 Binary files a/images/question-circle.png and /dev/null differ diff --git a/images/quick-start-conclusion.png b/images/quick-start-conclusion.png deleted file mode 100644 index f93257032c6d..000000000000 Binary files a/images/quick-start-conclusion.png and /dev/null differ diff --git a/images/quick-start-description.png b/images/quick-start-description.png deleted file mode 100644 index e8829706d2e9..000000000000 Binary files a/images/quick-start-description.png and /dev/null differ diff --git a/images/quick-start-display-name.png b/images/quick-start-display-name.png deleted file mode 100644 index c6b6aa3579ca..000000000000 Binary files a/images/quick-start-display-name.png and /dev/null differ diff --git a/images/quick-start-duration.png b/images/quick-start-duration.png deleted file mode 100644 index 7d88be59b935..000000000000 Binary files a/images/quick-start-duration.png and /dev/null differ diff --git a/images/quick-start-icon.png b/images/quick-start-icon.png deleted file mode 100644 index 1b1c7c0b8e8a..000000000000 Binary files a/images/quick-start-icon.png and /dev/null differ diff --git a/images/quick-start-introduction.png b/images/quick-start-introduction.png deleted file mode 100644 index 450cb8dc63b9..000000000000 Binary files a/images/quick-start-introduction.png and /dev/null differ diff --git a/images/rbac.png b/images/rbac.png deleted file mode 100644 index 63312ea53f2f..000000000000 Binary files a/images/rbac.png and /dev/null differ diff --git a/images/red-hat-applications-menu-icon.jpg b/images/red-hat-applications-menu-icon.jpg deleted file mode 100644 index c2afc1e8099e..000000000000 Binary files a/images/red-hat-applications-menu-icon.jpg and /dev/null differ diff --git a/images/redcircle-1.png b/images/redcircle-1.png deleted file mode 100644 index 4cbb364a2d76..000000000000 Binary files a/images/redcircle-1.png and /dev/null differ diff --git a/images/redcircle-2.png b/images/redcircle-2.png deleted file mode 100644 index 23f4a0d23f93..000000000000 Binary files a/images/redcircle-2.png and /dev/null differ diff --git a/images/redcircle-3.png b/images/redcircle-3.png deleted file mode 100644 index 06ac69f6a99c..000000000000 Binary files a/images/redcircle-3.png and /dev/null differ diff --git a/images/s2i-flow.png b/images/s2i-flow.png deleted file mode 100644 index b7ec58fb6353..000000000000 Binary files a/images/s2i-flow.png and /dev/null differ diff --git a/images/secure_deployments.png b/images/secure_deployments.png deleted file mode 100644 index 57add95959b2..000000000000 Binary files a/images/secure_deployments.png and /dev/null differ diff --git a/images/serverless-create-namespaces.png b/images/serverless-create-namespaces.png deleted file mode 100644 index 292db72db060..000000000000 Binary files a/images/serverless-create-namespaces.png and /dev/null differ diff --git a/images/serverless-create-service-admin.png b/images/serverless-create-service-admin.png deleted file mode 100644 index b3c99c8ef11f..000000000000 Binary files a/images/serverless-create-service-admin.png and /dev/null differ diff --git a/images/serverless-event-broker-workflow.png b/images/serverless-event-broker-workflow.png deleted file mode 100644 index aea669722c38..000000000000 Binary files a/images/serverless-event-broker-workflow.png and /dev/null differ diff --git a/images/serverless-event-channel-workflow.png b/images/serverless-event-channel-workflow.png deleted file mode 100644 index 99957bc3aed7..000000000000 Binary files a/images/serverless-event-channel-workflow.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example-dashboard.png b/images/serverless-monitoring-service-example-dashboard.png deleted file mode 100644 index d9c29422ac29..000000000000 Binary files a/images/serverless-monitoring-service-example-dashboard.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example1.png b/images/serverless-monitoring-service-example1.png deleted file mode 100644 index 27c73173cfc5..000000000000 Binary files a/images/serverless-monitoring-service-example1.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example2.png b/images/serverless-monitoring-service-example2.png deleted file mode 100644 index f0149cd5dd11..000000000000 Binary files a/images/serverless-monitoring-service-example2.png and /dev/null differ diff --git a/images/serverless-verify-broker-odc.png b/images/serverless-verify-broker-odc.png deleted file mode 100644 index 9a7a9b057275..000000000000 Binary files a/images/serverless-verify-broker-odc.png and /dev/null differ diff --git a/images/service-yaml-admin.png b/images/service-yaml-admin.png deleted file mode 100644 index 28823e44a019..000000000000 Binary files a/images/service-yaml-admin.png and /dev/null differ diff --git a/images/serving-YAML-HA.png b/images/serving-YAML-HA.png deleted file mode 100644 index caf0b5b1261a..000000000000 Binary files a/images/serving-YAML-HA.png and /dev/null differ diff --git a/images/serving-conditions-true.png b/images/serving-conditions-true.png deleted file mode 100644 index 22f38a447bab..000000000000 Binary files a/images/serving-conditions-true.png and /dev/null differ diff --git a/images/serving-overview.png b/images/serving-overview.png deleted file mode 100644 index c0e6478a9530..000000000000 Binary files a/images/serving-overview.png and /dev/null differ diff --git a/images/silence-overview.png b/images/silence-overview.png deleted file mode 100644 index 5b2093a3a89f..000000000000 Binary files a/images/silence-overview.png and /dev/null differ diff --git a/images/targets-and-dependencies.png b/images/targets-and-dependencies.png deleted file mode 100644 index 91fa65dbaf9a..000000000000 Binary files a/images/targets-and-dependencies.png and /dev/null differ diff --git a/images/telmetry-and-insights-operator-data-flow.svg b/images/telmetry-and-insights-operator-data-flow.svg deleted file mode 100644 index 4d0820cdd731..000000000000 --- a/images/telmetry-and-insights-operator-data-flow.svg +++ /dev/null @@ -1 +0,0 @@ -132_OpenShift_1220KubernetesAPIPrometheusAPIHTTPS (443)HTTPS (443)TelemeterClientOpenShiftContainer PlatformRed HatSupportSubscriptionmanagementOpenShiftCluster ManagerInsightsanalysis enginecloud.redhat.comapi.openshift.comRed HatWebconsoleInsightsOperator \ No newline at end of file diff --git a/images/toplogy-odc-apiserver.png b/images/toplogy-odc-apiserver.png deleted file mode 100644 index 22e6532e2091..000000000000 Binary files a/images/toplogy-odc-apiserver.png and /dev/null differ diff --git a/images/trustedsupplychain.png b/images/trustedsupplychain.png deleted file mode 100644 index e62e0f70824d..000000000000 Binary files a/images/trustedsupplychain.png and /dev/null differ diff --git a/images/update-runlevels.png b/images/update-runlevels.png deleted file mode 100644 index 309e195a9c58..000000000000 Binary files a/images/update-runlevels.png and /dev/null differ diff --git a/images/verify-channel-odc.png b/images/verify-channel-odc.png deleted file mode 100644 index 43a36617d790..000000000000 Binary files a/images/verify-channel-odc.png and /dev/null differ diff --git a/images/verify-kafka-ODC.png b/images/verify-kafka-ODC.png deleted file mode 100644 index 1f94c0e028c5..000000000000 Binary files a/images/verify-kafka-ODC.png and /dev/null differ diff --git a/images/verify-pingsource-ODC.png b/images/verify-pingsource-ODC.png deleted file mode 100644 index dac0464b36cf..000000000000 Binary files a/images/verify-pingsource-ODC.png and /dev/null differ diff --git a/images/verify-sinkbinding-odc.png b/images/verify-sinkbinding-odc.png deleted file mode 100644 index 91039cf2ab1a..000000000000 Binary files a/images/verify-sinkbinding-odc.png and /dev/null differ diff --git a/images/verify-subscription-odc.png b/images/verify-subscription-odc.png deleted file mode 100644 index c621e6d34f18..000000000000 Binary files a/images/verify-subscription-odc.png and /dev/null differ diff --git a/images/virt-icon.png b/images/virt-icon.png deleted file mode 100644 index dc1439e8d389..000000000000 Binary files a/images/virt-icon.png and /dev/null differ diff --git a/images/web_console_perspectives.png b/images/web_console_perspectives.png deleted file mode 100644 index 1018d33dbabc..000000000000 Binary files a/images/web_console_perspectives.png and /dev/null differ diff --git a/images/whatarecontainers.png b/images/whatarecontainers.png deleted file mode 100644 index 8c3bd20379d5..000000000000 Binary files a/images/whatarecontainers.png and /dev/null differ diff --git a/images/wmco-design.png b/images/wmco-design.png deleted file mode 100644 index 17245c45b7c2..000000000000 Binary files a/images/wmco-design.png and /dev/null differ diff --git a/installing/_attributes b/installing/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/installing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/installing/cluster-capabilities.adoc b/installing/cluster-capabilities.adoc deleted file mode 100644 index b59bb330d228..000000000000 --- a/installing/cluster-capabilities.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-capabilities"] -= Cluster capabilities -include::_attributes/common-attributes.adoc[] -:context: cluster-capabilities - -toc::[] - -Cluster administrators can use cluster capabilities to enable or disable optional components prior to installation. Cluster administrators can enable cluster capabilities at anytime after installation. - -[NOTE] -==== -Cluster administrators cannot disable a cluster capability after it is enabled. -==== - -include::modules/selecting-cluster-capabilities.adoc[leveloffset=+1] - -include::snippets/capabilities-table.adoc[] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] -* xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] - -include::modules/explanation-of-capabilities.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-operator-reference[Cluster Operators reference] - -include::modules/cluster-bare-metal-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[Deploying installer-provisioned clusters on bare metal] -* xref:../installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc#preparing-to-install-on-bare-metal[Preparing for bare metal cluster installation] -* xref:../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -include::modules/cluster-storage-operator.adoc[leveloffset=+2] - -include::modules/console-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../web_console/web-console-overview.adoc#web-console-overview[Web console overview] - -include::modules/cluster-csi-snapshot-controller-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots] - -include::modules/insights-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../support/remote_health_monitoring/using-insights-operator.adoc#using-insights-operator[Using Insights Operator] - -include::modules/operator-marketplace.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs[Red Hat-provided Operator catalogs] - -include::modules/node-tuning-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/using-node-tuning-operator.adoc#using-node-tuning-operator[Using the Node Tuning Operator] - -include::modules/cluster-samples-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../openshift_images/configuring-samples-operator.adoc#configuring-samples-operator[Configuring the Cluster Samples Operator] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources -* xref:../post_installation_configuration/enabling-cluster-capabilities.adoc#enabling-cluster-capabilities[Enabling cluster capabilities after installation] \ No newline at end of file diff --git a/installing/disconnected_install/_attributes b/installing/disconnected_install/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/disconnected_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/disconnected_install/images b/installing/disconnected_install/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/disconnected_install/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/disconnected_install/index.adoc b/installing/disconnected_install/index.adoc deleted file mode 100644 index b00aa205c287..000000000000 --- a/installing/disconnected_install/index.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-disconnected-about"] -= About disconnected installation mirroring -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-disconnected-about - -toc::[] - -You can use a mirror registry to ensure that your clusters only use container images that satisfy your organizational controls on external content. Before you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -[id="creating-mirror-registry"] -== Creating a mirror registry - -If you already have a container image registry, such as Red Hat Quay, you can use it as your mirror registry. If you do not already have a registry, you can xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[create a mirror registry using the _mirror registry for Red Hat OpenShift_]. - -[id="mirroring-images-disconnected-install"] -== Mirroring images for a disconnected installation - -You can use one of the following procedures to mirror your {product-title} image repository to your mirror registry: - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[Mirroring images for a disconnected installation] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] diff --git a/installing/disconnected_install/installing-mirroring-creating-registry.adoc b/installing/disconnected_install/installing-mirroring-creating-registry.adoc deleted file mode 100644 index 94c454d0a942..000000000000 --- a/installing/disconnected_install/installing-mirroring-creating-registry.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-creating-registry"] -= Creating a mirror registry with mirror registry for Red Hat OpenShift -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-creating-registry - -toc::[] - -The _mirror registry for Red Hat OpenShift_ is a small and streamlined container registry that you can use as a target for mirroring the required container images of {product-title} for disconnected installations. - -If you already have a container image registry, such as Red Hat Quay, you can skip this section and go straight to xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-mirror-repository_installing-mirroring-installation-images[Mirroring the OpenShift Container Platform image repository]. - -[id="prerequisites_installing-mirroring-creating-registry"] -== Prerequisites - -* An {product-title} subscription. -* {op-system-base-full} 8 and 9 with Podman 3.4.2 or later and OpenSSL installed. -* Fully qualified domain name for the Red Hat Quay service, which must resolve through a DNS server. -* Key-based SSH connectivity on the target host. SSH keys are automatically generated for local installs. For remote hosts, you must generate your own SSH keys. -* 2 or more vCPUs. -* 8 GB of RAM. -* About 12 GB for {product-title} {product-version} release images, or about 358 GB for {product-title} {product-version} release images and {product-title} {product-version} Red Hat Operator images. Up to 1 TB per stream or more is suggested. -+ -[IMPORTANT] -==== -These requirements are based on local testing results with only release images and Operator images. Storage requirements can vary based on your organization's needs. You might require more space, for example, when you mirror multiple z-streams. You can use standard link:https://access.redhat.com/documentation/en-us/red_hat_quay/3[Red Hat Quay functionality] or the proper link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index#deletefulltag[API callout] to remove unnecessary images and free up space. -==== - -include::modules/mirror-registry-introduction.adoc[leveloffset=+1] -include::modules/mirror-registry-localhost.adoc[leveloffset=+1] -include::modules/mirror-registry-localhost-update.adoc[leveloffset=+1] -include::modules/mirror-registry-remote.adoc[leveloffset=+1] -include::modules/mirror-registry-remote-host-update.adoc[leveloffset=+1] -include::modules/mirror-registry-uninstall.adoc[leveloffset=+1] -include::modules/mirror-registry-flags.adoc[leveloffset=+1] -include::modules/mirror-registry-release-notes.adoc[leveloffset=+1] -include::modules/mirror-registry-troubleshooting.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/using-ssl-to-protect-quay[Using SSL to protect connections to Red Hat Quay] - -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/using-ssl-to-protect-quay#configuring_the_system_to_trust_the_certificate_authority[Configuring the system to trust the certificate authority] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-mirror-repository_installing-mirroring-installation-images[Mirroring the OpenShift Container Platform image repository] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#olm-mirror-catalog_installing-mirroring-installation-images[Mirroring Operator catalogs for use with disconnected clusters] diff --git a/installing/disconnected_install/installing-mirroring-disconnected.adoc b/installing/disconnected_install/installing-mirroring-disconnected.adoc deleted file mode 100644 index 167932f7d3e2..000000000000 --- a/installing/disconnected_install/installing-mirroring-disconnected.adoc +++ /dev/null @@ -1,149 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-disconnected"] -= Mirroring images for a disconnected installation using the oc-mirror plugin -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-disconnected - -toc::[] - -Running your cluster in a restricted network without direct internet connectivity is possible by installing the cluster from a mirrored set of {product-title} container images in a private registry. This registry must be running at all times as long as the cluster is running. See the xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#prerequisites_installing-mirroring-disconnected[Prerequisites] section for more information. - -You can use the oc-mirror OpenShift CLI (`oc`) plugin to mirror images to a mirror registry in your fully or partially disconnected environments. You must run oc-mirror from a system with internet connectivity in order to download the required images from the official Red Hat registries. - -The following steps outline the high-level workflow on how to use the oc-mirror plugin to mirror images to a mirror registry: - -. Create an image set configuration file. -. Mirror the image set to the mirror registry by using one of the following methods: -** Mirror an image set directly to the mirror registry. -** Mirror an image set to disk, transfer the image set to the target environment, then upload the image set to the target mirror registry. -. Configure your cluster to use the resources generated by the oc-mirror plugin. -. Repeat these steps to update your mirror registry as necessary. - -// About the oc-mirror plugin -include::modules/oc-mirror-about.adoc[leveloffset=+1] - -// oc-mirror compatibility and support -include::modules/oc-mirror-support.adoc[leveloffset=+1] - -// About the mirror registry -include::modules/installation-about-mirror-registry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For information about viewing the CRI-O logs to view the image source, see xref:../../installing/validating-an-installation.adoc#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source]. - -[id="prerequisites_installing-mirroring-disconnected"] -== Prerequisites - -* You must have a container image registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2] in the location that will host the {product-title} cluster, such as Red Hat Quay. -+ -[NOTE] -==== -If you use Red Hat Quay, you must use version 3.6 or later with the oc-mirror plugin. If you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.6/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/[for proof-of-concept purposes] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.6/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat Support. -==== -+ -If you do not already have an existing solution for a container image registry, subscribers of {product-title} are provided a xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[mirror registry for Red Hat OpenShift]. The _mirror registry for Red Hat OpenShift_ is included with your subscription and is a small-scale container registry that can be used to mirror the required container images of {product-title} in disconnected installations. - -[id="mirroring-preparing-your-hosts"] -== Preparing your mirror hosts - -Before you can use the oc-mirror plugin to mirror images, you must install the plugin and create a container image registry credentials file to allow the mirroring from Red Hat to your mirror. - -// Installing the oc-mirror OpenShift CLI plugin -include::modules/oc-mirror-installing-plugin.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cli_reference/openshift_cli/extending-cli-plugins.adoc#cli-installing-plugins_cli-extend-plugins[Installing and using CLI plugins] - -// Configuring credentials that allow images to be mirrored -include::modules/installation-adding-registry-pull-secret.adoc[leveloffset=+2] - -// Creating the image set configuration -include::modules/oc-mirror-creating-image-set-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-imageset-config-params_installing-mirroring-disconnected[Image set configuration parameters] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-image-set-examples_installing-mirroring-disconnected[Image set configuration examples] -* xref:../../updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc#update-service-overview_updating-restricted-network-cluster-osus[Using the OpenShift Update Service in a disconnected environment] - -[id="mirroring-image-set"] -== Mirroring an image set to a mirror registry - -You can use the oc-mirror CLI plugin to mirror images to a mirror registry in a xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-partial[partially disconnected environment] or in a xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-full[fully disconnected environment]. - -These procedures assume that you already have your mirror registry set up. - -[id="mirroring-image-set-partial"] -=== Mirroring an image set in a partially disconnected environment - -In a partially disconnected environment, you can mirror an image set directly to the target mirror registry. - -// Mirroring from mirror to mirror -include::modules/oc-mirror-mirror-to-mirror.adoc[leveloffset=+3] - -[id="mirroring-image-set-full"] -=== Mirroring an image set in a fully disconnected environment - -To mirror an image set in a fully disconnected environment, you must first xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-mirror-to-disk_installing-mirroring-disconnected[mirror the image set to disk], then xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-disk-to-mirror_installing-mirroring-disconnected[mirror the image set file on disk to a mirror]. - -// Mirroring from mirror to disk -include::modules/oc-mirror-mirror-to-disk.adoc[leveloffset=+3] - -// Mirroring from disk to mirror in a disconnected environment -include::modules/oc-mirror-disk-to-mirror.adoc[leveloffset=+3] - -// Configuring your cluster to use the resources generated by oc-mirror -include::modules/oc-mirror-updating-cluster-manifests.adoc[leveloffset=+1] - -[id="updating-mirror-registry-content"] -== Keeping your mirror registry content updated - -After your target mirror registry is populated with the initial image set, be sure to update it regularly so that it has the latest content. You can optionally set up a cron job, if possible, so that the mirror registry is updated on a regular basis. - -Ensure that you update your image set configuration to add or remove {product-title} and Operator releases as necessary. Any images that are removed are pruned from the mirror registry. - -// About updating your mirror registry content -include::modules/oc-mirror-updating-registry-about.adoc[leveloffset=+2] - -// Updating your mirror registry content -include::modules/oc-mirror-differential-updates.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-image-set-examples_installing-mirroring-disconnected[Image set configuration examples] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-partial[Mirroring an image set in a partially disconnected environment] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-full[Mirroring an image set in a fully disconnected environment] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-updating-cluster-manifests_installing-mirroring-disconnected[Configuring your cluster to use the resources generated by oc-mirror] - -// Performing a dry run -include::modules/oc-mirror-dry-run.adoc[leveloffset=+1] - -// Including local OCI Operator catalogs -include::modules/oc-mirror-oci-format.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// TODO: This title might need to update per sebastian's PR -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-updating-cluster-manifests_installing-mirroring-disconnected[Configuring your cluster to use the resources generated by oc-mirror] - -// Image set configuration parameters -include::modules/oc-mirror-imageset-config-params.adoc[leveloffset=+1] - -// Image set configuration examples -include::modules/oc-mirror-image-set-config-examples.adoc[leveloffset=+1] - -// Command reference for oc-mirror -include::modules/oc-mirror-command-reference.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-mirroring-disconnected"] -== Additional resources - -* xref:../../updating/updating_a_cluster/updating_disconnected_cluster/index.adoc#about-restricted-network-updates[About cluster updates in a disconnected environment] diff --git a/installing/disconnected_install/installing-mirroring-installation-images.adoc b/installing/disconnected_install/installing-mirroring-installation-images.adoc deleted file mode 100644 index ac4512cd355e..000000000000 --- a/installing/disconnected_install/installing-mirroring-installation-images.adoc +++ /dev/null @@ -1,154 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-installation-images"] -= Mirroring images for a disconnected installation -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-installation-images - -toc::[] - -You can ensure your clusters only use container images that satisfy your organizational controls on external content. Before you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -// TODO: Is this procedure going to be marked deprecated for 4.10 so that it could be removed in the future? -// TODO: Add a link to the TP procedure? -// TODO: Consider updating the title of this one to indicate the difference? Or wait to make any changes like that til GA, til we know if it'll stick around or be completely replaced by the oc-mirror one? - -[IMPORTANT] -==== -You must have access to the internet to obtain the necessary container images. -In this procedure, you place your mirror registry on a mirror host -that has access to both your network and the internet. If you do not have access -to a mirror host, use the xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#olm-mirror-catalog_installing-mirroring-installation-images[Mirroring Operator catalogs for use with disconnected clusters] procedure to copy images to a device you can move across network boundaries with. -==== - -[id="prerequisites_installing-mirroring-installation-images"] -== Prerequisites - -* You must have a container image registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2] in the location that will host the {product-title} cluster, such as one of the following registries: -+ --- -** link:https://www.redhat.com/en/technologies/cloud-computing/quay[Red Hat Quay] -** link:https://jfrog.com/artifactory/[JFrog Artifactory] -** link:https://www.sonatype.com/products/repository-oss?topnav=true[Sonatype Nexus Repository] -** link:https://goharbor.io/[Harbor] --- -+ -If you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.5/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/[for proof-of-concept purposes] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.5/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat support. - -* If you do not already have an existing solution for a container image registry, subscribers of {product-title} are provided a xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[mirror registry for Red Hat OpenShift]. The _mirror registry for Red Hat OpenShift_ is included with your subscription and is a small-scale container registry that can be used to mirror the required container images of {product-title} in disconnected installations. - -include::modules/installation-about-mirror-registry.adoc[leveloffset=+1] - -.Additional information - -For information about viewing the CRI-O logs to view the image source, see xref:../../installing/validating-an-installation.adoc#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source]. - -[id="installing-preparing-mirror"] -== Preparing your mirror host - -Before you perform the mirror procedure, you must prepare the host to retrieve content -and push it to the remote location. - -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -include::modules/installation-adding-registry-pull-secret.adoc[leveloffset=+1] - -//This command seems out of place. Where should it really go? -//// -[id="installing-performing-connected-mirror"] -== Performing a mirror while connected to the internet - -$ oc adm release mirror OPENSHIFT_VERSION --to MIRROR_REPOSITORY -//// - -//// -[id="installing-restricted-networks-preparations-mirroring"] -== Mirroring the content - -In production environments, add the required images to a registry in your restricted network. For non-production environments, you can use the images without a separate registry. - - modules/installation-performing-disconnected-mirror.adoc[leveloffset=+2] - - modules/installation-performing-disconnected-mirror-without-registry.adoc[leveloffset=+2] -//// - -include::modules/installation-mirror-repository.adoc[leveloffset=+1] - -[id="installing-preparing-samples-operator"] -== The Cluster Samples Operator in a disconnected environment - -In a disconnected environment, you must take additional steps after you install a cluster to configure the Cluster Samples Operator. Review the following information in preparation. - -include::modules/installation-images-samples-disconnected-mirroring-assist.adoc[leveloffset=+2] - -include::modules/olm-mirroring-catalog.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] - -[id="olm-mirror-catalog-prerequisites_installing-mirroring-installation-images"] -=== Prerequisites - -Mirroring Operator catalogs for use with disconnected clusters has the following prerequisites: - -* Workstation with unrestricted network access. -* `podman` version 1.9.3 or later. -* If you want to filter, or _prune_, an existing catalog and selectively mirror only a subset of Operators, see the following sections: -** xref:../../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[Installing the opm CLI] -** xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-filtering-fbc_olm-managing-custom-catalogs[Updating or filtering a file-based catalog image] -ifndef::openshift-origin[] -* If you want to mirror a Red Hat-provided catalog, run the following command on your workstation with unrestricted network access to authenticate with `registry.redhat.io`: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- -endif::[] -* Access to a mirror registry that supports -link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker v2-2]. -* On your mirror registry, decide which repository, or namespace, to use for storing mirrored Operator content. For example, you might create an `olm-mirror` repository. -* If your mirror registry does not have internet access, connect removable media to your workstation with unrestricted network access. -* If you are working with private registries, including `registry.redhat.io`, set the `REG_CREDS` environment variable to the file path of your registry credentials for use in later steps. For example, for the `podman` CLI: -+ -[source,terminal] ----- -$ REG_CREDS=${XDG_RUNTIME_DIR}/containers/auth.json ----- - -include::modules/olm-mirroring-catalog-extracting.adoc[leveloffset=+2] -include::modules/olm-mirroring-catalog-colocated.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - -include::modules/olm-mirroring-catalog-airgapped.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - -include::modules/olm-mirroring-catalog-manifests.adoc[leveloffset=+2] -include::modules/olm-mirroring-catalog-post.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../post_installation_configuration/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating OperatorHub from mirrored Operator catalogs] -* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-filtering-fbc_olm-managing-custom-catalogs[Updating or filtering a file-based catalog image] - -[id="next-steps_installing-mirroring-installation-images"] -== Next steps - -//* TODO need to add the registry secret to the machines, which is different - -* Install a cluster on infrastructure that you provision in your restricted network, such as on -xref:../../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[VMware vSphere], -xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal], or xref:../../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Amazon Web Services]. - -[role="_additional-resources"] -[id="restricted-networks-additional-resources"] -== Additional resources - -* See xref:../../support/gathering-cluster-data.adoc#gathering-data-specific-features_gathering-cluster-data[Gathering data about specific features] for more information about using must-gather. diff --git a/installing/disconnected_install/modules b/installing/disconnected_install/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/disconnected_install/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/disconnected_install/snippets b/installing/disconnected_install/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/disconnected_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/images b/installing/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/installing/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/installing/index.adoc b/installing/index.adoc deleted file mode 100644 index 79643bc8a691..000000000000 --- a/installing/index.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="ocp-installation-overview"] -= {product-title} installation overview -include::_attributes/common-attributes.adoc[] -:context: ocp-installation-overview - -toc::[] - -include::modules/installation-overview.adoc[leveloffset=+1] - - -include::modules/install-openshift-common-terms.adoc[leveloffset=+2] - -include::modules/installation-process.adoc[leveloffset=+2] - -include::modules/ipi-verifying-nodes-after-installation.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../post_installation_configuration/bare-metal-configuration.adoc#getting-the-baremetalhost-resource_post-install-bare-metal-configuration[Getting the BareMetalHost resource] - -* xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-troubleshooting-following-the-installation_ipi-install-installation-workflow[Following the installation] - -* xref:../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation] - -* xref:../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Agent-based Installer] - -* link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[Assisted Installer for OpenShift Container Platform] - -[discrete] -=== Installation scope - -The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. - -[role="_additional-resources"] -.Additional resources - -* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources. - -include::modules/installation-openshift-local.adoc[leveloffset=+2] - -include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../installing/installing-preparing.adoc#supported-installation-methods-for-different-platforms[Supported installation methods for different platforms] for more information about the types of installations that are available for each supported platform. - -* See xref:../installing/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] for information about choosing an installation method and preparing the required resources. diff --git a/installing/install_config/_attributes b/installing/install_config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/install_config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/install_config/configuring-custom-ca.adoc b/installing/install_config/configuring-custom-ca.adoc deleted file mode 100644 index 290bfc593a76..000000000000 --- a/installing/install_config/configuring-custom-ca.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-custom-ca"] -= Configuring a custom certificate authority -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-ca - -toc::[] - -If you install {product-title} with a proxy or in a restricted network, -you might need to configure a custom certificate authority (CA). - -//include::modules/configuring-firewall.adoc[leveloffset=+1] diff --git a/installing/install_config/configuring-firewall.adoc b/installing/install_config/configuring-firewall.adoc deleted file mode 100644 index 5a55a7b1e345..000000000000 --- a/installing/install_config/configuring-firewall.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-firewall"] -= Configuring your firewall -include::_attributes/common-attributes.adoc[] -:context: configuring-firewall - -toc::[] - -If you use a firewall, you must configure it so that {product-title} can access the sites that it requires to function. You must always grant access to some sites, and you grant access to more if you use -Red Hat Insights, the Telemetry service, a cloud to host your cluster, and certain build strategies. - -include::modules/configuring-firewall.adoc[leveloffset=+1] diff --git a/installing/install_config/enabling-cgroup-v2.adoc b/installing/install_config/enabling-cgroup-v2.adoc deleted file mode 100644 index 84b1f5bc8e41..000000000000 --- a/installing/install_config/enabling-cgroup-v2.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -:context: nodes-cluster-cgroups-2 -[id="enabling-cgroup-v2"] -= Enabling Linux control group version 2 (cgroup v2) -include::_attributes/common-attributes.adoc[] - -toc::[] - - -ifndef::openshift-origin[] -By default, {product-title} uses link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1) in your cluster. You can enable link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) upon installation. Enabling cgroup v2 in {product-title} disables all cgroup version 1 controllers and hierarchies in your cluster. - -cgroup v2 is the next version of the Linux cgroup API. cgroup v2 offers several improvements over cgroup v1, including a unified hierarchy, safer sub-tree delegation, new features such as link:https://www.kernel.org/doc/html/latest/accounting/psi.html[Pressure Stall Information], and enhanced resource management and isolation. - -You can switch between cgroup v1 and cgroup v2, as needed, by editing the `node.config` object. For more information, see "Configuring the Linux cgroup on your nodes" in the "Additional resources" of this section. -endif::openshift-origin[] - -ifdef::openshift-origin[] -By default, {product-title} uses link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) in your cluster. You can switch to link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1), if needed. - -cgroup v2 is the next version of the kernel link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01[control group] and offers multiple improvements. However, it can have some unwanted effects on your nodes. -endif::openshift-origin[] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -ifndef::openshift-origin[] -include::modules/nodes-clusters-cgroups-2-install.adoc[leveloffset=+1] -endif::openshift-origin[] - -ifdef::openshift-origin[] -include::modules/nodes-clusters-cgroups-okd-configure.adoc[leveloffset=+1] -endif::openshift-origin[] - -.Additional resources - -* xref:../../installing/index.adoc#ocp-installation-overview[OpenShift Container Platform installation overview] -* xref:../../nodes/clusters/nodes-cluster-cgroups-2.adoc#nodes-clusters-cgroups-2_nodes-cluster-cgroups-2[Configuring the Linux cgroup on your nodes] diff --git a/installing/install_config/images b/installing/install_config/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/install_config/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/install_config/installing-customizing.adoc b/installing/install_config/installing-customizing.adoc deleted file mode 100644 index f83ad75f6289..000000000000 --- a/installing/install_config/installing-customizing.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-customizing"] -= Customizing nodes -include::_attributes/common-attributes.adoc[] -:context: installing-customizing - -toc::[] - -Although directly making changes to {product-title} nodes is discouraged, -there are times when it is necessary to implement a -required low-level security, redundancy, networking, or performance feature. -Direct changes to {product-title} nodes can be done by: - -* Creating machine configs that are included in manifest files -to start up a cluster during `openshift-install`. - -* Creating machine configs that are passed to running -{product-title} nodes via the Machine Config Operator. - -* Creating an Ignition config that is passed to `coreos-installer` -when installing bare-metal nodes. - -The following sections describe features that you might want to -configure on your nodes in this way. - -include::modules/installation-special-config-butane.adoc[leveloffset=+1] -include::modules/installation-special-config-butane-about.adoc[leveloffset=+2] -include::modules/installation-special-config-butane-install.adoc[leveloffset=+2] -include::modules/installation-special-config-butane-create.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-kmod_installing-customizing[Adding kernel modules to nodes] -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-storage_installing-customizing[Encrypting and mirroring disks during installation] - -include::modules/installation-special-config-kargs.adoc[leveloffset=+1] -ifdef::openshift-webscale[] -include::modules/installation-special-config-rtkernel.adoc[leveloffset=+1] -endif::openshift-webscale[] -include::modules/installation-special-config-kmod.adoc[leveloffset=+1] -include::modules/installation-special-config-storage.adoc[leveloffset=+1] -include::modules/installation-special-config-raid.adoc[leveloffset=+1] -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* For information on Butane, see xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane]. - -//// -ifndef::openshift-origin[] -* For information on FIPS support, see xref:../../installing/installing-fips.adoc#installing-fips[Support for FIPS cryptography]. -endif::[] - -//// diff --git a/installing/install_config/modules b/installing/install_config/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/install_config/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/install_config/snippets b/installing/install_config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/install_config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing-fips.adoc b/installing/installing-fips.adoc deleted file mode 100644 index 2ba5b1770fbf..000000000000 --- a/installing/installing-fips.adoc +++ /dev/null @@ -1,92 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-fips"] -= Support for FIPS cryptography -include::_attributes/common-attributes.adoc[] -:context: installing-fips - -toc::[] - -You can install an {product-title} cluster that uses FIPS Validated / Modules in Process cryptographic libraries on `x86_64`, `ppc64le`, and `s390x` architectures. - -For the {op-system-first} machines in your cluster, this change is applied when the machines are deployed based on the status of an option in the `install-config.yaml` file, which governs the cluster options that a user can change during cluster deployment. With {op-system-base-full} machines, you must enable FIPS mode when you install the operating system on the machines that you plan to use as worker machines. These configuration methods ensure that your cluster meet the requirements of a FIPS compliance audit: only FIPS Validated / Modules in Process cryptography packages are enabled before the initial system boot. - -Because FIPS must be enabled before the operating system that your cluster uses boots for the first time, you cannot enable FIPS after you deploy a cluster. - -[id="installation-about-fips-validation_{context}"] -== FIPS validation in {product-title} - -{product-title} uses certain FIPS Validated / Modules in Process modules within {op-system-base} and {op-system} for the operating system components that it uses. See link:https://access.redhat.com/articles/3655361[RHEL8 core crypto components]. For example, when users SSH into {product-title} clusters and containers, those connections are properly encrypted. - -{product-title} components are written in Go and built with Red Hat's golang compiler. When you enable FIPS mode for your cluster, all {product-title} components that require cryptographic signing call {op-system-base} and {op-system} cryptographic libraries. - -.FIPS mode attributes and limitations in {product-title} {product-version} -[cols="8a,8a",options="header"] -|=== - -|Attributes -|Limitations - -|FIPS support in {op-system-base} 8 and {op-system} operating systems. -.3+|The FIPS implementation does not offer a single function that both computes hash functions and validates the keys that are based on that hash. This limitation will continue to be evaluated and improved in future {product-title} releases. - -|FIPS support in CRI-O runtimes. -|FIPS support in {product-title} services. - -|FIPS Validated / Modules in Process cryptographic module and algorithms that are obtained from {op-system-base} 8 and {op-system} binaries and images. -| - -|Use of FIPS compatible golang compiler. -|TLS FIPS support is not complete but is planned for future {product-title} releases. - -|FIPS support across multiple architectures. -|FIPS is currently only supported on {product-title} deployments using `x86_64`, `ppc64le`, and `s390x` architectures. - -|=== - -[id="installation-about-fips-components_{context}"] -== FIPS support in components that the cluster uses - -Although the {product-title} cluster itself uses FIPS Validated / Modules in Process modules, ensure that the systems that support your {product-title} cluster use FIPS Validated / Modules in Process modules for cryptography. - -[id="installation-about-fips-components-etcd_{context}"] -=== etcd - -To ensure that the secrets that are stored in etcd use FIPS Validated / Modules in Process encryption, boot the node in FIPS mode. After you install the cluster in FIPS mode, you can xref:../security/encrypting-etcd.adoc#encrypting-etcd[encrypt the etcd data] by using the FIPS-approved `aes cbc` cryptographic algorithm. - -[id="installation-about-fips-components-storage_{context}"] -=== Storage - -For local storage, use {op-system-base}-provided disk encryption or Container Native Storage that uses {op-system-base}-provided disk encryption. By storing all data in volumes that use {op-system-base}-provided disk encryption and enabling FIPS mode for your cluster, both data at rest and data in motion, or network data, are protected by FIPS Validated / Modules in Process encryption. -You can configure your cluster to encrypt the root filesystem of each node, as described -in xref:../installing/install_config/installing-customizing.adoc#installing-customizing[Customizing nodes]. - -[id="installation-about-fips-components-runtimes_{context}"] -=== Runtimes - -To ensure that containers know that they are running on a host that is using FIPS Validated / Modules in Process cryptography modules, use CRI-O to manage your runtimes. CRI-O supports FIPS mode, in that it configures the containers to know that they are running in FIPS mode. - -[id="installing-fips-mode_{context}"] -== Installing a cluster in FIPS mode - -To install a cluster in FIPS mode, follow the instructions to install a customized cluster on your preferred infrastructure. Ensure that you set `fips: true` in the `install-config.yaml` file before you deploy your cluster. - -* xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Amazon Web Services] -* xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Alibaba Cloud] -* xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Microsoft Azure] -* xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Bare metal] -* xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Google Cloud Platform] -* xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[IBM Cloud VPC] -* xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[{ibmpowerProductName}] -* xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[{ibmzProductName} and {linuxoneProductName}] -* xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[{ibmzProductName} and {linuxoneProductName} with {op-system-base} KVM] -* xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[{rh-openstack-first}] -* xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[VMware vSphere] - -[NOTE] -==== -If you are using Azure File storage, you cannot enable FIPS mode. -==== - -To apply `AES CBC` encryption to your etcd data store, follow the xref:../security/encrypting-etcd.adoc#encrypting-etcd[Encrypting etcd data] process after you install your cluster. - -If you add {op-system-base} nodes to your cluster, ensure that you enable FIPS mode on the machines before their initial boot. See xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Adding RHEL compute machines to an {product-title} cluster] and link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/using-the-system-wide-cryptographic-policies_security-hardening#enabling-fips-mode-in-a-container_using-the-system-wide-cryptographic-policies[Enabling FIPS Mode] in the {op-system-base} 8 documentation. diff --git a/installing/installing-preparing.adoc b/installing/installing-preparing.adoc deleted file mode 100644 index d6c637557d89..000000000000 --- a/installing/installing-preparing.adoc +++ /dev/null @@ -1,617 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-preparing"] -= Selecting a cluster installation method and preparing it for users -include::_attributes/common-attributes.adoc[] -:context: installing-preparing - -toc::[] - -Before you install {product-title}, decide what kind of installation process to follow and verify that you have all of the required resources to prepare the cluster for users. - -[id="installing-preparing-selecting-cluster-type"] -== Selecting a cluster installation type -Before you install an {product-title} cluster, you need to select the best installation instructions to follow. Think about your answers to the following questions to select the best option. - -[id="installing-preparing-install-manage"] -=== Do you want to install and manage an {product-title} cluster yourself? - -If you want to install and manage {product-title} yourself, you can install it on the following platforms: - -* Alibaba Cloud -* Amazon Web Services (AWS) on 64-bit x86 instances -ifndef::openshift-origin[] -* Amazon Web Services (AWS) on 64-bit ARM instances -endif::openshift-origin[] -* Microsoft Azure on 64-bit x86 instances -* Microsoft Azure on 64-bit ARM instances -* Microsoft Azure Stack Hub -* Google Cloud Platform (GCP) -* {rh-openstack-first} -* IBM Cloud VPC -* {ibmzProductName} or {linuxoneProductName} -* {ibmzProductName} or {linuxoneProductName} for {op-system-base-full} KVM -* {ibmpowerProductName} -* {ibmpowerProductName} Virtual Server -* Nutanix -* VMware vSphere -* Bare metal or other platform agnostic infrastructure -// might want a note about single node here - -You can deploy an {product-title} 4 cluster to both on-premise hardware and to cloud hosting services, but all of the machines in a cluster must be in the same data center or cloud hosting service. - -If you want to use {product-title} but do not want to manage the cluster yourself, you have several managed service options. If you want a cluster that is fully managed by Red Hat, you can use link:https://www.openshift.com/products/dedicated/[OpenShift Dedicated] or link:https://www.openshift.com/products/online/[OpenShift Online]. You can also use OpenShift as a managed service on Azure, AWS, IBM Cloud VPC, or Google Cloud. For more information about managed services, see the link:https://www.openshift.com/products[OpenShift Products] page. If you install an {product-title} cluster with a cloud virtual machine as a virtual bare metal, the corresponding cloud-based storage is not supported. - -[id="installing-preparing-migrate"] -=== Have you used {product-title} 3 and want to use {product-title} 4? - -If you used {product-title} 3 and want to try {product-title} 4, you need to understand how different {product-title} 4 is. {product-title} 4 weaves the Operators that package, deploy, and manage Kubernetes applications and the operating system that the platform runs on, {op-system-first}, together seamlessly. Instead of deploying machines and configuring their operating systems so that you can install {product-title} on them, the {op-system} operating system is an integral part of the {product-title} cluster. Deploying the operating system for the cluster machines as part of the installation process for {product-title}. See xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#migration-comparing-ocp-3-4[Differences between {product-title} 3 and 4]. - -Because you need to provision machines as part of the {product-title} cluster installation process, you cannot upgrade an {product-title} 3 cluster to {product-title} 4. Instead, you must create a new {product-title} 4 cluster and migrate your {product-title} 3 workloads to them. For more information about migrating, see xref:../migrating_from_ocp_3_to_4/index.adoc#migration-from-version-3-to-4-overview[Migrating from {product-title} 3 to 4 overview]. Because you must migrate to {product-title} 4, you can use any type of production cluster installation process to create your new cluster. - -[id="installing-preparing-existing-components"] -=== Do you want to use existing components in your cluster? - -Because the operating system is integral to {product-title}, it is easier to let the installation program for {product-title} stand up all of the infrastructure. These are called _installer provisioned infrastructure_ installations. In this type of installation, you can provide some existing infrastructure to the cluster, but the installation program deploys all of the machines that your cluster initially needs. - -You can deploy an installer-provisioned infrastructure cluster without specifying any customizations to the cluster or its underlying machines to xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[Alibaba Cloud], xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[AWS], xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Azure], xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Azure Stack Hub], xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[GCP], xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Nutanix]. - -If you need to perform basic configuration for your installer-provisioned infrastructure cluster, such as the instance type for the cluster machines, you can customize an installation for xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Alibaba Cloud], xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[AWS], xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Azure], xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[GCP], xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Nutanix]. - -For installer-provisioned infrastructure installations, you can use an existing xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[VPC in AWS], xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[vNet in Azure], or xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[VPC in GCP]. You can also reuse part of your networking infrastructure so that your cluster in xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[AWS], xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Azure], xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[GCP] can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. If you have existing accounts and credentials on these clouds, you can re-use them, but you might need to modify the accounts to have the required permissions to install {product-title} clusters on them. - - -You can use the installer-provisioned infrastructure method to create appropriate machine instances on your hardware for xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[{rh-openstack}], xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[{rh-openstack} with Kuryr], xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[vSphere], and xref:../installing/installing_bare_metal_ipi/ipi-install-overview#ipi-install-overview[bare metal]. Additionally, for xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[vSphere], you can also customize additional network parameters during installation. - - -If you want to reuse extensive cloud infrastructure, you can complete a _user-provisioned infrastructure_ installation. With these installations, you manually deploy the machines that your cluster requires during the installation process. If you perform a user-provisioned infrastructure installation on xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS], xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Azure], xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Azure Stack Hub], you can use the provided templates to help you stand up all of the required components. You can also reuse a shared xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[VPC on GCP]. Otherwise, you can use the xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[provider-agnostic] installation method to deploy a cluster into other clouds. - - -You can also complete a user-provisioned infrastructure installation on your existing hardware. If you use xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[{rh-openstack}], xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[{ibmzProductName} or {linuxoneProductName}], xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[{ibmzProductName} and {linuxoneProductName} with {op-system-base} KVM], xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[IBM Power], or xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[vSphere], use the specific installation instructions to deploy your cluster. If you use other supported hardware, follow the xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[bare metal installation] procedure. For some of these platforms, such as xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[{rh-openstack}], xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[vSphere], and xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[bare metal], you can also customize additional network parameters during installation. - - -[id="installing-preparing-security"] -=== Do you need extra security for your cluster? - -If you use a user-provisioned installation method, you can configure a proxy for your cluster. The instructions are included in each installation procedure. - -If you want to prevent your cluster on a public cloud from exposing endpoints externally, you can deploy a private cluster with installer-provisioned infrastructure on xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[AWS], xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Azure], or xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[GCP]. - -If you need to install your cluster that has limited access to the internet, such as a disconnected or restricted network cluster, you can xref:../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[mirror the installation packages] and install the cluster from them. Follow detailed instructions for user provisioned infrastructure installations into restricted networks for xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[GCP], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[{ibmzProductName} or {linuxoneProductName}], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[{ibmzProductName} or {linuxoneProductName} with {op-system-base} KVM], xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[IBM Power], xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[vSphere], or xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal]. You can also install a cluster into a restricted network using installer-provisioned infrastructure by following detailed instructions for xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[AWS], xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[GCP], xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[Nutanix, xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[{rh-openstack}], and xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[vSphere]. - - -If you need to deploy your cluster to an xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[AWS GovCloud region], xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[AWS China region], or xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Azure government region], you can configure those custom regions during an installer-provisioned infrastructure installation. - -//// -ifndef::openshift-origin[] -You can also configure the cluster machines to use xref:../installing/installing-fips.adoc#installing-fips[FIPS Validated / Modules in Process cryptographic libraries] during installation. - -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== - -endif::[] -//// - -//// -[id="installing-preparing-single-node"] -=== Are you installing single-node clusters at the edge? - -You can use the assisted installer to deploy xref:../installing/installing_sno/install-sno-installing-sno.adoc#installing-sno[single node] clusters for edge workloads. -//// - -[id="installing-preparing-cluster-for-users"] -== Preparing your cluster for users after installation - -Some configuration is not required to install the cluster but recommended before your users access the cluster. You can customize the cluster itself by xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[customizing] the Operators that make up your cluster and integrate you cluster with other required systems, such as an identity provider. -//This link will change when we consolidate the customizations page with the post-installation activities. - -For a production cluster, you must configure the following integrations: - -* xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Persistent storage] -* xref:../authentication/understanding-identity-provider.adoc#understanding-identity-provider[An identity provider] -* xref:../monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack[Monitoring core OpenShift Container Platform components] - -[id="installing-preparing-cluster-for-workloads"] -== Preparing your cluster for workloads - -Depending on your workload needs, you might need to take extra steps before you begin deploying applications. For example, after you prepare infrastructure to support your application xref:../cicd/builds/build-strategies.adoc#build-strategies[build strategy], you might need to make provisions for xref:../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-low-latency-tuning[low-latency] workloads or to xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[protect sensitive workloads]. You can also configure xref:../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[monitoring] for application workloads. -If you plan to run xref:../windows_containers/enabling-windows-container-workloads.adoc#enabling-windows-container-workloads[Windows workloads], you must enable xref:../networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.adoc#configuring-hybrid-networking[hybrid networking with OVN-Kubernetes] during the installation process; hybrid networking cannot be enabled after your cluster is installed. - -[id="supported-installation-methods-for-different-platforms"] -== Supported installation methods for different platforms - -You can perform different types of installations on different platforms. - -[NOTE] -==== -Not all installation options are supported for all platforms, as shown in the following tables. A checkmark indicates that the option is supported and links to the relevant section. -==== - -.Installer-provisioned infrastructure options -//This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifndef::openshift-origin[] -|=== -||Alibaba |AWS (x86_64) |AWS (arm64) |Azure (x86_64) |Azure (arm64)|Azure Stack Hub |GCP |Nutanix |{rh-openstack} |Bare metal (x86_64) |Bare metal (arm64) |vSphere |VMC |IBM Cloud VPC |{ibmzProductName} |{ibmpowerProductName} |{ibmpowerProductName} Virtual Server - -|Default -|xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[✓] -|xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -| -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| -| - -|Custom -|xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[✓] -|xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[✓] -| -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[✓] - - -|Network customization -|xref:../installing/installing_alibaba/installing-alibaba-network-customizations.adoc#installing-alibaba-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc#installing-azure-stack-hub-network-customizations[✓] -|xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[✓] -| -|xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[✓] -| -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[✓] -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -| -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[✓] -|xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-installation-workflow[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-installation-workflow[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[✓] -| -| -| -|xref:../installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc#installing-restricted-networks-ibm-power-vs[✓] - -|Private clusters -| -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -| -|xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[✓] -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc#installing-ibm-power-vs-private-cluster[✓] - -|Existing virtual private networks -| -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -| -|xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[✓] -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc#installing-ibm-powervs-vpc[✓] - -|Government regions -| -|xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[✓] -| -|xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| - -|Secret regions -| -|xref:../installing/installing_aws/installing-aws-secret-region.adoc#installing-aws-secret-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -|China regions -| -|xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifdef::openshift-origin[] -|=== -||Alibaba |AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |Bare metal |vSphere |VMC |IBM Cloud VPC |{ibmzProductName} |{ibmpowerProductName} - - -|Default -|xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -| -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| - -|Custom -|xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[✓] -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| - -|Network customization -|xref:../installing/installing_alibaba/installing-alibaba-network-customizations.adoc#installing-alibaba-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc#installing-azure-stack-hub-network-customizations[✓] -|xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[✓] -| -|xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[✓] -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[✓] -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[✓] -|xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[✓] -| -|xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[✓] -| -| -| - -|Private clusters -| -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -| -|xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[✓] -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[✓] -| -| - -|Existing virtual private networks -| -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -| -|xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[✓] -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[✓] -| -| - -|Government regions -| -|xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[✓] -|xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[✓] -| -| -| -| -| -| -| -| -| -| - -|Secret regions -| -|xref:../installing/installing_aws/installing-aws-secret-region.adoc#installing-aws-secret-region[✓] -| -| -| -| -| -| -| -| -| -| -| - -|China regions -| -|xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[✓] -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -.User-provisioned infrastructure options -//This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifndef::openshift-origin[] -|=== -||Alibaba |AWS (x86_64) |AWS (arm64) |Azure (x86_64) |Azure (arm64) |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |Bare metal (x86_64) |Bare metal (arm64) |vSphere |VMC |IBM Cloud VPC |{ibmzProductName} |{ibmzProductName} with {op-system-base} KVM |{ibmpowerProductName} |Platform agnostic - - -|Custom -| -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[✓] -|xref:../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[✓] -| -|xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[✓] -|xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[✓] - - -|Network customization -| -| -| -| -| -| -| -| -|xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[✓] -| -| -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -| -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[✓] -| -| -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[✓] -| - -|Shared VPC hosted outside of cluster project -| -| -| -| -| -| -|xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[✓] -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifdef::openshift-origin[] -|=== -||Alibaba |AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack}|Bare metal |vSphere |VMC |IBM Cloud VPC |{ibmzProductName} |{ibmzProductName} with {op-system-base} KVM |{ibmpowerProductName} |Platform agnostic - - -|Custom -| -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[✓] -|xref:../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[✓] -| -|xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[✓] -|xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[✓] - - -|Network customization -| -| -| -| -| -| -|xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[✓] -| -| -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[✓] -| -| -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[✓] -| - -|Shared VPC hosted outside of cluster project -| -| -| -| -|xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[✓] -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//// -.Special use cases -|=== -|Single Node - -|xref:../installing/installing_sno/install-sno-installing-sno.adoc#installing-sno[✓] - - -|=== -//// -// sync diff --git a/installing/installing-troubleshooting.adoc b/installing/installing-troubleshooting.adoc deleted file mode 100644 index f2330d138a3b..000000000000 --- a/installing/installing-troubleshooting.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-troubleshooting"] -= Troubleshooting installation issues -include::_attributes/common-attributes.adoc[] -:context: installing-troubleshooting - -toc::[] - -To assist in troubleshooting a failed {product-title} installation, you can gather logs from the bootstrap and control plane machines. You can also get debug information from the installation program. If you are unable to resolve the issue using the logs and debug information, see xref:../support/troubleshooting/troubleshooting-installations.adoc#determining-where-installation-issues-occur_troubleshooting-installations[Determining where installation issues occur] for component-specific troubleshooting. - -[NOTE] -==== -If your {product-title} installation fails and the debug output or logs contain network timeouts or other connectivity errors, review the guidelines for xref:../installing/install_config/configuring-firewall.adoc#configuring-firewall[configuring your firewall]. Gathering logs from your firewall and load balancer can help you diagnose network-related errors. -==== - -== Prerequisites - -* You attempted to install an {product-title} cluster and the installation failed. - -include::modules/installation-bootstrap-gather.adoc[leveloffset=+1] - -include::modules/manually-gathering-logs-with-ssh.adoc[leveloffset=+1] - -include::modules/manually-gathering-logs-without-ssh.adoc[leveloffset=+1] - -include::modules/installation-getting-debug-information.adoc[leveloffset=+1] - -include::modules/restarting-installation.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/index.adoc#ocp-installation-overview[Installing an {product-title} cluster] diff --git a/installing/installing_alibaba/_attributes b/installing/installing_alibaba/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_alibaba/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_alibaba/images b/installing/installing_alibaba/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_alibaba/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_alibaba/installing-alibaba-customizations.adoc b/installing/installing_alibaba/installing-alibaba-customizations.adoc deleted file mode 100644 index 3fc08a24a635..000000000000 --- a/installing/installing_alibaba/installing-alibaba-customizations.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-customizations"] -= Installing a cluster on Alibaba Cloud with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on Alibaba Cloud. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -[id="next-steps_installing-alibaba-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_alibaba/installing-alibaba-default.adoc b/installing/installing_alibaba/installing-alibaba-default.adoc deleted file mode 100644 index 2655c5bb74d2..000000000000 --- a/installing/installing_alibaba/installing-alibaba-default.adoc +++ /dev/null @@ -1,60 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-default"] -= Installing a cluster quickly on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Alibaba Cloud that uses the default configuration options. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-default"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You have xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[created the required Alibaba Cloud resources]. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the kube-system namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-alibaba-default"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials] diff --git a/installing/installing_alibaba/installing-alibaba-network-customizations.adoc b/installing/installing_alibaba/installing-alibaba-network-customizations.adoc deleted file mode 100644 index 16a4be1ef188..000000000000 --- a/installing/installing_alibaba/installing-alibaba-network-customizations.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-network-customizations"] -= Installing a cluster on Alibaba Cloud with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-network-customizations - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Alibaba Cloud with customized network configuration options. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and -VXLAN configurations. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/nw-network-config.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//Networking-specific customization module -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -[id="next-steps_installing-alibaba-network-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. \ No newline at end of file diff --git a/installing/installing_alibaba/installing-alibaba-vpc.adoc b/installing/installing_alibaba/installing-alibaba-vpc.adoc deleted file mode 100644 index ab7e6baa1500..000000000000 --- a/installing/installing_alibaba/installing-alibaba-vpc.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-vpc"] -= Installing a cluster on Alibaba Cloud into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Alibaba Virtual Private Cloud (VPC) on Alibaba Cloud Services. The installation program provisions the required infrastructure, which can then be customized. To customize the VPC installation, modify the parameters in the 'install-config.yaml' file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/installation-custom-alibaba-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console - -[id="next-steps_installing-alibaba-vpc"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. - diff --git a/installing/installing_alibaba/manually-creating-alibaba-ram.adoc b/installing/installing_alibaba/manually-creating-alibaba-ram.adoc deleted file mode 100644 index 1c166cebf244..000000000000 --- a/installing/installing_alibaba/manually-creating-alibaba-ram.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-alibaba-ram"] -= Creating the required Alibaba Cloud resources -include::_attributes/common-attributes.adoc[] -:context: manually-creating-alibaba-ram - -toc::[] - -Before you install {product-title}, you must use the Alibaba Cloud console to create a Resource Access Management (RAM) user that has sufficient permissions to install {product-title} into your Alibaba Cloud. This user must also have permissions to create new RAM users. You can also configure and use the `ccoctl` tool to create new credentials for the {product-title} components with the permissions that they require. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -//Task part 1: Manually creating the required RAM user -include::modules/manually-creating-alibaba-ram-user.adoc[leveloffset=+1] - -//Task part 2: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -//Task part 3: Creating Alibaba resources with a single command -// modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+1] - -[id="next-steps_manually-creating-alibaba-ram"] -== Next steps - -* Install a cluster on Alibaba Cloud infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -** **xref:../../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[Installing a cluster quickly on Alibaba Cloud]**: You can install a cluster quickly by using the default configuration options. - -** **xref:../../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Installing a customized cluster on Alibaba Cloud]**: The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - diff --git a/installing/installing_alibaba/modules b/installing/installing_alibaba/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_alibaba/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc b/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc deleted file mode 100644 index 1df21d71d2fc..000000000000 --- a/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-alibaba"] -= Preparing to install on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-alibaba - -toc::[] - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_preparing-to-install-on-alibaba"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-alibaba"] -== Requirements for installing {product-title} on Alibaba Cloud - -Before installing {product-title} on Alibaba Cloud, you must configure and register your domain, create a Resource Access Management (RAM) user for the installation, and review the supported Alibaba Cloud data center regions and zones for the installation. - -include::modules/installation-alibaba-dns.adoc[leveloffset=+1] - -// include modules/installation-alibaba-limits.adoc[leveloffset=+1] - -// include modules/installation-alibaba-ram-user.adoc[leveloffset=+1] - -include::modules/installation-alibaba-regions.adoc[leveloffset=+1] - -[id="next-steps_preparing-to-install-on-alibaba"] -== Next steps - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Create the required Alibaba Cloud resources]. - diff --git a/installing/installing_alibaba/snippets b/installing/installing_alibaba/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_alibaba/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_alibaba/uninstall-cluster-alibaba.adoc b/installing/installing_alibaba/uninstall-cluster-alibaba.adoc deleted file mode 100644 index 9b4fbbb22346..000000000000 --- a/installing/installing_alibaba/uninstall-cluster-alibaba.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-alibaba"] -= Uninstalling a cluster on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-alibaba - -toc::[] - -You can remove a cluster that you deployed to Alibaba Cloud. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_aws/_attributes b/installing/installing_aws/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_aws/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_aws/images b/installing/installing_aws/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_aws/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_aws/installing-aws-account.adoc b/installing/installing_aws/installing-aws-account.adoc deleted file mode 100644 index cda3cd0adcc9..000000000000 --- a/installing/installing_aws/installing-aws-account.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-account"] -= Configuring an AWS account -include::_attributes/common-attributes.adoc[] -:context: installing-aws-account - -toc::[] - -Before you can install {product-title}, you must configure an -Amazon Web Services (AWS) account. - -include::modules/installation-aws-route53.adoc[leveloffset=+1] - -include::modules/nw-endpoint-route53.adoc[leveloffset=+2] - -include::modules/installation-aws-limits.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+1] - -include::modules/installation-aws-iam-user.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] for steps to set the Cloud Credential Operator (CCO) to manual mode prior to installation. Use this mode in environments where the cloud identity and access management (IAM) APIs are not reachable, or if you prefer not to store an administrator-level credential secret in the cluster `kube-system` project. - -include::modules/installation-aws-iam-policies-about.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions-iam-roles.adoc[leveloffset=+2] -include::modules/installation-aws-add-iam-roles.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* See xref:../../installing/installing_aws/installing-aws-customizations.adoc#installation-launching-installer_installing-aws-customizations[Deploying the cluster]. - -include::modules/installation-aws-access-analyzer.adoc[leveloffset=+2] - -include::modules/installation-aws-marketplace.adoc[leveloffset=+1] - -include::modules/installation-aws-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Quickly install a cluster] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] -** xref:../../installing/installing_aws/installing-aws-outposts-remote-workers.adoc#installing-aws-outposts-remote-workers[Installing a cluster on AWS with remote workers on AWS Outposts] diff --git a/installing/installing_aws/installing-aws-china.adoc b/installing/installing_aws/installing-aws-china.adoc deleted file mode 100644 index 525610b503ad..000000000000 --- a/installing/installing_aws/installing-aws-china.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-china-region"] -= Installing a cluster on AWS China -include::_attributes/common-attributes.adoc[] -:context: installing-aws-china-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster to the following Amazon Web Services (AWS) China regions: - -* `cn-north-1` (Beijing) -* `cn-northwest-1` (Ningxia) - -== Prerequisites - -* You have an Internet Content Provider (ICP) license. -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-customizations.adoc b/installing/installing_aws/installing-aws-customizations.adoc deleted file mode 100644 index 365b0b5c2b0d..000000000000 --- a/installing/installing_aws/installing-aws-customizations.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-customizations"] -= Installing a cluster on AWS with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-aws-customizations -:platform: AWS - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Amazon Web Services (AWS). To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-default.adoc b/installing/installing_aws/installing-aws-default.adoc deleted file mode 100644 index 30daa9bf5e18..000000000000 --- a/installing/installing_aws/installing-aws-default.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-default"] -= Installing a cluster quickly on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. Manual mode can also be used in environments where the cloud IAM APIs are not reachable. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc b/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc deleted file mode 100644 index 14273eb525c0..000000000000 --- a/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="aws-expanding-a-cluster-with-on-premise-bare-metal-nodes"] -= Expanding a cluster with on-premise bare metal nodes -include::_attributes/common-attributes.adoc[] -:context: aws-expanding-a-cluster-with-on-premise-bare-metal-nodes - -toc::[] - -You can expand an {product-title} cluster deployed on AWS by adding bare-metal nodes to the cluster. By default, a cluster deployed on AWS with {product-title} 4.11 or earlier has the Baremetal Operator (BMO) disabled. In {product-title} 4.12 and later releases, the BMO is enabled to support a hybrid cloud consisting of AWS control plane nodes and worker nodes with additional on-premise bare-metal worker nodes. - -Expanding an {product-title} cluster deployed on AWS requires using virtual media with bare-metal nodes that meet the xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#node-requirements_ipi-install-prerequisites[node requirements] and xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[firmware requirements] for installing with virtual media. A `provisioning` network is not required, and if present, should be xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#modifying-install-config-for-no-provisioning-network_ipi-install-installation-workflow[disabled]. - -include::modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-aws-vpc"] -.Additional resources - -* link:https://docs.aws.amazon.com/vpc/?icmpid=docs_homepage_featuredsvcs[Amazon VPC] -* link:https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html[VPC peering] - -include::modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc[leveloffset=+1] - -After you have the networking configured, you can proceed with xref:../installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#ipi-install-expanding-the-cluster[expanding the cluster]. \ No newline at end of file diff --git a/installing/installing_aws/installing-aws-government-region.adoc b/installing/installing_aws/installing-aws-government-region.adoc deleted file mode 100644 index 905cbf9482aa..000000000000 --- a/installing/installing_aws/installing-aws-government-region.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-government-region"] -= Installing a cluster on AWS into a government region -include::_attributes/common-attributes.adoc[] -:context: installing-aws-government-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) into a government region. To configure the -region, modify parameters in the `install-config.yaml` file before you -install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-aws-about-government-region.adoc[leveloffset=+1] - -include::modules/installation-prereq-aws-private-cluster.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-localzone.adoc b/installing/installing_aws/installing-aws-localzone.adoc deleted file mode 100644 index 930b814c17a2..000000000000 --- a/installing/installing_aws/installing-aws-localzone.adoc +++ /dev/null @@ -1,160 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-localzone"] -= Installing a cluster using AWS Local Zones -include::_attributes/common-attributes.adoc[] -:context: installing-aws-localzone - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) into an existing VPC, extending workers to the edge of the Cloud Infrastructure using AWS Local Zones. - -After you create an Amazon Web Service (AWS) Local Zone environment, and you deploy your cluster, you can use edge worker nodes to create user workloads in Local Zone subnets. - -AWS Local Zones are a type of infrastructure that place Cloud Resources close to the metropolitan regions. For more information, see the link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones[AWS Local Zones Documentation]. - -{product-title} can be installed in existing VPCs with Local Zone subnets. The Local Zone subnets can be used to extend the regular workers' nodes to the edge networks. The edge worker nodes are dedicated to running user workloads. - -One way to create the VPC and subnets is to use the provided CloudFormation templates. You can modify the templates to customize your infrastructure or use the information that they contain to create AWS objects according to your company's policies. - -[IMPORTANT] -==== -The steps for performing an installer-provisioned infrastructure installation are provided as an example only. Installing a cluster with VPC you provide requires knowledge of the cloud provider and the installation process of {product-title}. The CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You noted the region and supported link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/locations[AWS Local Zones locations] to create the network resources in. -* You read the link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/[Features] for each AWS Local Zones location. -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or UNIX)] in the AWS documentation. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. -* Add permission for the user who creates the cluster to modify the Local Zone group with `ec2:ModifyAvailabilityZoneGroup`. For example: -+ -.An example of a permissive IAM policy to attach to a user or role -[source,yaml] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "ec2:ModifyAvailabilityZoneGroup" - ], - "Effect": "Allow", - "Resource": "*" - } - ] -} ----- - -include::modules/cluster-limitations-local-zone.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/understanding-persistent-storage.adoc#pvc-storage-class_understanding-persistent-storage[Storage classes] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc-localzone.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc-localzone.adoc[leveloffset=+2] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-aws-add-local-zone-locations.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-subnet-localzone.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-subnet-localzone.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/[AWS Local Zones features] in the AWS documentation for more information about AWS Local Zones and the supported instances types and services. - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] -// Suggest to standarize edge-pool's specific files with same prefixes, like: machine-edge-pool-[...] or compute-edge-pool-[...] (which is more compatible with install-config.yaml/compute) -include::modules/machines-edge-machine-pool.adoc[leveloffset=+2] -include::modules/edge-machine-pools-aws-local-zones.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../../networking/changing-cluster-network-mtu.adoc#mtu-value-selection_changing-cluster-network-mtu[Changing the MTU for the cluster network] -* xref:../../networking/changing-cluster-network-mtu.adoc#nw-ovn-ipsec-enable_configuring-ipsec-ovn[Enabling IPsec encryption] - -include::modules/install-creating-install-config-aws-local-zones.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -//include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//Put this back if QE validates it. - -// Verify removal due to automation. -// include::modules/installation-localzone-generate-k8s-manifest.adoc[leveloffset=+2] - - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -.Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#installation-extend-edge-nodes-aws-local-zones_post-install-cluster-tasks[Creating user workloads in AWS Local Zones] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/machine-edge-pool-review-nodes.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -[id="installing-aws-localzone-next-steps"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#installation-extend-edge-nodes-aws-local-zones_post-install-cluster-tasks[Creating user workloads in AWS Local Zones]. -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-network-customizations.adoc b/installing/installing_aws/installing-aws-network-customizations.adoc deleted file mode 100644 index 9e7874e03cfa..000000000000 --- a/installing/installing_aws/installing-aws-network-customizations.adoc +++ /dev/null @@ -1,100 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-network-customizations"] -= Installing a cluster on AWS with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-aws-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) with customized network configuration options. By -customizing your network configuration, your cluster can coexist with existing -IP address allocations in your environment and integrate with existing MTU and -VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. -// TODO -// Concept that describes networking - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/nw-network-config.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using a Network Load Balancer (NLB) on AWS, see xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc#configuring-ingress-cluster-traffic-aws-network-load-balancer[Configuring Ingress cluster traffic on AWS using a Network Load Balancer]. -==== - -include::modules/nw-aws-nlb-new-cluster.adoc[leveloffset=+1] - -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-outposts-remote-workers.adoc b/installing/installing_aws/installing-aws-outposts-remote-workers.adoc deleted file mode 100644 index fd844a94f188..000000000000 --- a/installing/installing_aws/installing-aws-outposts-remote-workers.adoc +++ /dev/null @@ -1,104 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-outposts-remote-workers"] -= Installing a cluster on AWS with remote workers on AWS Outposts -include::_attributes/common-attributes.adoc[] -:context: installing-aws-outposts-remote-workers - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) with remote workers running in AWS Outposts. -This can be achieved by customizing the default AWS installation and performing some manual steps. - -For more info about AWS Outposts see link:https://docs.aws.amazon.com/outposts/index.html[AWS Outposts Documentation]. - -[IMPORTANT] -==== -In order to install a cluster with remote workers in AWS Outposts, all worker instances must be located within the same Outpost instance and cannot be located in an AWS region. It is not possible for the cluster to have instances in both AWS Outposts and AWS region. In addition, it also follows that control plane nodes mustn't be schedulable. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -* You are familiar with the instance types are supported in the AWS Outpost instance you use. This can be validated with link:https://docs.aws.amazon.com/cli/latest/reference/outposts/get-outpost-instance-types.html[get-outpost-instance-types AWS CLI command] -* You are familiar with the AWS Outpost instance details, such as OutpostArn and AvailabilityZone. This can be validated with link:https://docs.aws.amazon.com/cli/latest/reference/outposts/list-outposts.html[list-outposts AWS CLI command] -+ -[IMPORTANT] -==== -Since the cluster uses the provided AWS credentials to create AWS resources for its entire life cycle, the credentials must be key-based and long-lived. So, If you have an AWS profile stored on your computer, it must not use a temporary session token, generated while using a multi-factor authentication device. For more information about generating the appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You may supply the keys when you run the installation program. -==== -* You have access to an existing Amazon Virtual Private Cloud (VPC) in Amazon Web Services (AWS). See the section "About using a custom VPC" for more information. -* If a firewall is used, it was xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+1] - -include::modules/installation-identify-supported-aws-outposts-instance-types.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-aws-editing-manifests.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Cluster Limitations - -[IMPORTANT] -==== -Network Load Balancer (NLB) and Classic Load Balancer are not supported on AWS Outposts. After the cluster is created, all the Load Balancers are created in the AWS region. In order to use Load Balancers created inside the Outpost instances, Application Load Balancer should be used. The AWS Load Balancer Operator can be used in order to achieve that goal. - -If you want to use a public subnet located in the outpost instance for the ALB, you need to remove the special tag (`kubernetes.io/cluster/.*-outposts: owned`) that was added earlier during the VPC creation. This will prevent you from creating new Services of type LoadBalancer (Network Load Balancer). - -See xref:../../networking/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc[Understanding the AWS Load Balancer Operator] for more information -==== - -[IMPORTANT] -==== -Persistent storage using AWS Elastic Block Store limitations - -* AWS Outposts does not support Amazon Elastic Block Store (EBS) gp3 volumes. After installation, the cluster includes two storage classes - gp3-csi and gp2-csi, with gp3-csi being the default storage class. It is important to always use gp2-csi. You can change the default storage class using the following OpenShift CLI (oc) commands: -+ -[source,terminal] ----- -$ oc annotate --overwrite storageclass gp3-csi storageclass.kubernetes.io/is-default-class=false -$ oc annotate --overwrite storageclass gp2-csi storageclass.kubernetes.io/is-default-class=true ----- -* To create a Volume in the Outpost instance, the CSI driver determines the Outpost ARN based on the topology keys stored on the CSINode objects. To ensure that the CSI driver uses the correct topology values, it is necessary to use the `WaitForConsumer` volume binding mode and avoid setting allowed topologies on any new storage class created. -==== - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-private.adoc b/installing/installing_aws/installing-aws-private.adoc deleted file mode 100644 index 99c4aff225ac..000000000000 --- a/installing/installing_aws/installing-aws-private.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-private"] -= Installing a private cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC on Amazon Web Services (AWS). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-secret-region.adoc b/installing/installing_aws/installing-aws-secret-region.adoc deleted file mode 100644 index c6e99961692e..000000000000 --- a/installing/installing_aws/installing-aws-secret-region.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-secret-region"] -= Installing a cluster on AWS into a Secret or Top Secret Region -include::_attributes/common-attributes.adoc[] -:context: installing-aws-secret-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) into the following secret regions: - -* Secret Commercial Cloud Services (SC2S) -* Commercial Cloud Services (C2S) - -To configure a cluster in either region, you change parameters in the `install config.yaml` file before you install the cluster. - -[id="prerequisites_installing-aws-secret-region"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multifactor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-aws-about-government-region.adoc[leveloffset=+1] - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-aws-secret-region_console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-aws-secret-region_telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-aws-secret-region"] -== Next steps -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-three-node.adoc b/installing/installing_aws/installing-aws-three-node.adoc deleted file mode 100644 index 4bc6d680896c..000000000000 --- a/installing/installing_aws/installing-aws-three-node.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-three-node"] -= Installing a three-node cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Amazon Web Services (AWS). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -[NOTE] -==== -Deploying a three-node cluster using an AWS Marketplace image is not supported. -==== - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] -* xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] diff --git a/installing/installing_aws/installing-aws-user-infra.adoc b/installing/installing_aws/installing-aws-user-infra.adoc deleted file mode 100644 index 04af00bb7e19..000000000000 --- a/installing/installing_aws/installing-aws-user-infra.adoc +++ /dev/null @@ -1,220 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-user-infra"] -= Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates -include::_attributes/common-attributes.adoc[] -:context: installing-aws-user-infra -:platform: AWS - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) that uses infrastructure that you provide. - -One way to create this infrastructure is to use the provided CloudFormation templates. You can modify the templates to customize your infrastructure or use the information that they contain to create AWS objects according to your company's policies. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or UNIX)] in the AWS documentation. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-requirements.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+2] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-creating-aws-dns.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-dns.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -* You can view details about your hosted zones by navigating to the link:https://console.aws.amazon.com/route53/[AWS Route 53 console]. - -* See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html[Listing public hosted zones] in the AWS documentation for more information about listing public hosted zones. - -include::modules/installation-creating-aws-security.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-security.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-aws-ami-stream-metadata.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+2] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-bootstrap.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -* See xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installation-aws-user-infra-rhcos-ami_installing-aws-user-infra[{op-system} AMIs for the AWS infrastructure] for details about the {op-system-first} AMIs for the AWS zones. - -include::modules/installation-creating-aws-control-plane.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-control-plane.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-creating-aws-worker.adoc[leveloffset=+1] - -//// -[id="installing-workers-aws-user-infra"] -== Creating worker nodes - -You can either manually create worker nodes or use a MachineSet to create worker -nodes after the cluster deploys. If you use a MachineSet to create and maintain -the workers, you can allow the cluster to manage them. This allows you to easily -scale, manage, and upgrade your workers. -//// - -include::modules/installation-cloudformation-worker.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-aws-user-infra-bootstrap.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for details about monitoring the installation, bootstrap, and control plane logs as an {product-title} installation progresses. - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#gathering-bootstrap-diagnostic-data_troubleshooting-installations[Gathering bootstrap node diagnostic data] for information about troubleshooting issues related to the bootstrap process. - -* You can view details about the running instances that are created by using the link:https://console.aws.amazon.com/ec2[AWS EC2 console]. - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -You can configure registry storage for user-provisioned infrastructure in AWS to deploy {product-title} to hidden regions. See xref:../../registry/configuring_registry_storage/configuring-registry-storage-aws-user-infrastructure.adoc#configuring-registry-storage-aws-user-infrastructure[Configuring the registry for AWS user-provisioned infrastructure] for more information. - -include::modules/registry-configuring-storage-aws-user-infra.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-aws-user-infra-delete-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-installation.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -[role="_additional-resources"] -[id="installing-aws-user-infra-additional-resources"] -== Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. - -[id="installing-aws-user-infra-next-steps"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-vpc.adoc b/installing/installing_aws/installing-aws-vpc.adoc deleted file mode 100644 index 5930262085ca..000000000000 --- a/installing/installing_aws/installing-aws-vpc.adoc +++ /dev/null @@ -1,74 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-vpc"] -= Installing a cluster on AWS into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-aws-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Amazon Virtual Private Cloud (VPC) on Amazon Web Services (AWS). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] -include::modules/installation-aws-security-groups.adoc[leveloffset=+2] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-applying-aws-security-groups.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc b/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc deleted file mode 100644 index 5aea22557e62..000000000000 --- a/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-aws-installer-provisioned"] -= Installing a cluster on AWS in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-aws-installer-provisioned - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) in a restricted network by creating an internal mirror of the installation release content on an existing Amazon Virtual Private Cloud (VPC). - -[id="prerequisites_installing-restricted-networks-aws-installer-provisioned"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in AWS. When installing to a restricted network using installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)] in the AWS documentation. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -If you are configuring a proxy, be sure to also review this site list. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-restricted-networks-aws-installer-provisioned"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_aws/installing-restricted-networks-aws.adoc b/installing/installing_aws/installing-restricted-networks-aws.adoc deleted file mode 100644 index 3651683aea19..000000000000 --- a/installing/installing_aws/installing-restricted-networks-aws.adoc +++ /dev/null @@ -1,204 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-aws"] -= Installing a cluster on AWS in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-aws - -toc::[] - -In {product-title} version {product-version}, you can install a -cluster on Amazon Web Services (AWS) using infrastructure that you provide and -an internal mirror of the installation release content. - -[IMPORTANT] -==== -While you can install an {product-title} cluster by using mirrored installation -release content, your cluster still requires internet access to use the AWS APIs. -==== - -One way to create this infrastructure is to use the provided -CloudFormation templates. You can modify the templates to customize your -infrastructure or use the information that they contain to create AWS objects -according to your company's policies. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)] in the AWS documentation. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-requirements.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+2] - -//You extract the installation program from the mirrored content. - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-dns.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-dns.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html[Listing public hosted zones] in the AWS documentation for more information about listing public hosted zones. - -include::modules/installation-creating-aws-security.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-security.adoc[leveloffset=+2] - -include::modules/installation-aws-ami-stream-metadata.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-bootstrap.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installation-aws-user-infra-rhcos-ami_installing-aws-user-infra[{op-system} AMIs for the AWS infrastructure] for details about the {op-system-first} AMIs for the AWS zones. - -include::modules/installation-creating-aws-control-plane.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-control-plane.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-worker.adoc[leveloffset=+1] - -//// -[id="installing-workers-aws-user-infra"] -== Creating worker nodes - -You can either manually create worker nodes or use a MachineSet to create worker nodes after the cluster deploys. If you use a MachineSet to create and maintain the workers, you can allow the cluster to manage them. This allows you to easily scale, manage, and upgrade your workers. -//// - -include::modules/installation-cloudformation-worker.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-bootstrap.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for details about monitoring the installation, bootstrap, and control plane logs as an {product-title} installation progresses. - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#gathering-bootstrap-diagnostic-data_troubleshooting-installations[Gathering bootstrap node diagnostic data] for information about troubleshooting issues related to the bootstrap process. - -//You can install the CLI on the mirror host. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-aws-user-infra.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-aws-user-infra-delete-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-installation.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="installing-restricted-networks-aws-additional-resources"] -== Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. - -[id="installing-restricted-networks-aws-next-steps"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/manually-creating-iam.adoc b/installing/installing_aws/manually-creating-iam.adoc deleted file mode 100644 index 45a38eb40978..000000000000 --- a/installing/installing_aws/manually-creating-iam.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-aws"] -= Manually creating IAM for AWS -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-aws - -//TO-DO: this should be one file for AWS, Azure, and GCP with conditions for specifics. - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// AWS only. Condition out if combining topic for AWS/Azure/GCP. -* To learn how to use the CCO utility (`ccoctl`) to configure the CCO to use the AWS STS, see xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with STS]. - -// Not supported in Azure. Condition out if combining topic for AWS/Azure/GCP. -* To learn how to rotate or remove the administrator-level credential secret after installing {product-title}, see xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-rotate-remove-cloud-creds[Rotating or removing cloud provider credentials]. - -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -include::modules/mint-mode.adoc[leveloffset=+1] - -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -[id="manually-creating-iam-aws-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Installing a cluster quickly on AWS] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] diff --git a/installing/installing_aws/modules b/installing/installing_aws/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/installing/installing_aws/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/installing/installing_aws/preparing-to-install-on-aws.adoc b/installing/installing_aws/preparing-to-install-on-aws.adoc deleted file mode 100644 index 18a9a35bd265..000000000000 --- a/installing/installing_aws/preparing-to-install-on-aws.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-aws"] -= Preparing to install on AWS -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-aws - -toc::[] - -[id="preparing-to-install-on-aws-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-aws"] -== Requirements for installing {product-title} on AWS - -Before installing {product-title} on Amazon Web Services (AWS), you must create an AWS account. See xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Configuring an AWS account] for details about configuring an account, account limits, account permissions, IAM user setup, and supported AWS regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] for other options, including xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[configuring the Cloud Credential Operator (CCO) to use the Amazon Web Services Security Token Service (AWS STS)]. - -[id="choosing-an-method-to-install-ocp-on-aws"] -== Choosing a method to install {product-title} on AWS - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-aws-single-node"] -=== Installing a cluster on a single node - -Installing {product-title} on a single node alleviates some of the requirements for high availability and large scale clusters. However, you must address the xref:../../installing/installing_sno/install-sno-preparing-to-install-sno.adoc#install-sno-requirements-for-installing-on-a-single-node_install-sno-preparing[requirements for installing on a single node], and the xref:../../installing/installing_sno/install-sno-installing-sno.adoc#additional-requirements-for-installing-on-a-single-node-on-aws_install-sno-installing-sno-with-the-assisted-installer[additional requirements for installing on a single node on AWS]. After addressing the requirements for single node installation, use the xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a customized cluster on AWS] procedure to install the cluster. The xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-sno-installing-sno-manually[installing single-node OpenShift manually] section contains an exemplary `install-config.yaml` file when installing an {product-title} cluster on a single node. - -[id="choosing-an-method-to-install-ocp-on-aws-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on AWS infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Installing a cluster quickly on AWS]**: You can install {product-title} on AWS infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a customized cluster on AWS]**: You can install a customized cluster on AWS infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Installing a cluster on AWS with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[Installing a cluster on AWS in a restricted network]**: You can install {product-title} on AWS on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. - -* **xref:../../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[Installing a cluster on an existing Virtual Private Cloud]**: You can install {product-title} on an existing AWS Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing AWS VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[Installing a cluster on AWS into a government or secret region]**: {product-title} can be deployed into AWS regions that are specifically designed for US government agencies at the federal, state, and local level, as well as contractors, educational institutions, and other US customers that must run sensitive workloads in the cloud. - -[id="choosing-an-method-to-install-ocp-on-aws-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on AWS infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on AWS infrastructure that you provide]**: You can install {product-title} on AWS infrastructure that you provide. You can use the provided CloudFormation templates to create stacks of AWS resources that represent each of the components required for an {product-title} installation. - -* **xref:../../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Installing a cluster on AWS in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on AWS infrastructure that you provide by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the AWS APIs. - -[id="preparing-to-install-on-aws-next-steps"] -== Next steps - -* xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Configuring an AWS account] diff --git a/installing/installing_aws/snippets b/installing/installing_aws/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_aws/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_aws/uninstalling-cluster-aws.adoc b/installing/installing_aws/uninstalling-cluster-aws.adoc deleted file mode 100644 index 1a856732afb2..000000000000 --- a/installing/installing_aws/uninstalling-cluster-aws.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-aws"] -= Uninstalling a cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-aws - -toc::[] - -You can remove a cluster that you deployed to Amazon Web Services (AWS). - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-deleting-sts-resources.adoc[leveloffset=+1] - -include::modules/installation-aws-delete-cluster.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="installing-localzone-additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. -* link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#opt-in-local-zone[Opt into AWS Local Zones] -* link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/locations[AWS Local Zones available locations] -* link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features[AWS Local Zones features] \ No newline at end of file diff --git a/installing/installing_azure/_attributes b/installing/installing_azure/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_azure/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_azure/enabling-user-managed-encryption-azure.adoc b/installing/installing_azure/enabling-user-managed-encryption-azure.adoc deleted file mode 100644 index efa301c928c7..000000000000 --- a/installing/installing_azure/enabling-user-managed-encryption-azure.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="enabling-user-managed-encryption-azure"] -= Enabling user-managed encryption for Azure -include::_attributes/common-attributes.adoc[] -:context: enabling-user-managed-encryption-azure - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a user-managed encryption key in Azure. To enable this feature, you can prepare an Azure DiskEncryptionSet before installation, modify the `install-config.yaml` file, and then complete the installation. - -include::modules/installation-azure-preparing-diskencryptionsets.adoc[leveloffset=+1] - -[id="enabling-disk-encryption-sets-azure-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Install a cluster with customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[Install a cluster into an existing VNet on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Install a private cluster on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Install a cluster into an government region on installer-provisioned infrastructure] diff --git a/installing/installing_azure/images b/installing/installing_azure/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_azure/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_azure/installing-azure-account.adoc b/installing/installing_azure/installing-azure-account.adoc deleted file mode 100644 index 95f2c8420bfb..000000000000 --- a/installing/installing_azure/installing-azure-account.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-account"] -= Configuring an Azure account -include::_attributes/common-attributes.adoc[] -:context: installing-azure-account - -toc::[] - -Before you can install {product-title}, you must configure a Microsoft Azure -account. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to -resource name restrictions, and you cannot create resources that use certain -terms. For a list of terms that Azure restricts, see -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] -in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-network-config.adoc[leveloffset=+1] - -include::modules/installation-azure-increasing-limits.adoc[leveloffset=+1] - -include::modules/installation-azure-permissions.adoc[leveloffset=+1] - -include::modules/minimum-required-permissions-ipi-azure.adoc[leveloffset=+1] - -include::modules/installation-azure-service-principal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-azure-marketplace.adoc[leveloffset=+1] - -include::modules/installation-azure-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster on Azure. You can -xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[install a customized cluster] -or -xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[quickly install a cluster] with default options. diff --git a/installing/installing_azure/installing-azure-customizations.adoc b/installing/installing_azure/installing-azure-customizations.adoc deleted file mode 100644 index 13967aa1cd82..000000000000 --- a/installing/installing_azure/installing-azure-customizations.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-customizations"] -= Installing a cluster on Azure with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-customizations -:platform: Azure - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Microsoft Azure. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-user-defined-tags-azure.adoc[leveloffset=+1] - -include::modules/querying-user-defined-tags-azure.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-default.adoc b/installing/installing_azure/installing-azure-default.adoc deleted file mode 100644 index 39dd5e3ea0da..000000000000 --- a/installing/installing_azure/installing-azure-default.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-default"] -= Installing a cluster quickly on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Microsoft Azure that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-government-region.adoc b/installing/installing_azure/installing-azure-government-region.adoc deleted file mode 100644 index 6bed17ab0878..000000000000 --- a/installing/installing_azure/installing-azure-government-region.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-government-region"] -= Installing a cluster on Azure into a government region -include::_attributes/common-attributes.adoc[] -:context: installing-azure-government-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Microsoft Azure into a government region. To configure the government region, -you modify parameters in the `install-config.yaml` file before you install the -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated government region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/installation-azure-about-government-region.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-azure.adoc[leveloffset=+2] - -include::modules/installation-azure-user-defined-routing.adoc[leveloffset=+2] - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-network-customizations.adoc b/installing/installing_azure/installing-azure-network-customizations.adoc deleted file mode 100644 index b16643bce98c..000000000000 --- a/installing/installing_azure/installing-azure-network-customizations.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-network-customizations"] -= Installing a cluster on Azure with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program -provisions on Microsoft Azure. By customizing your network configuration, your -cluster can coexist with existing IP address allocations in your environment and -integrate with existing MTU and VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. Manual mode can also be used in environments where the cloud IAM APIs are not reachable. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-private.adoc b/installing/installing_azure/installing-azure-private.adoc deleted file mode 100644 index 6e67fcf4d30a..000000000000 --- a/installing/installing_azure/installing-azure-private.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-private"] -= Installing a private cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing Azure Virtual Network (VNet) on Microsoft Azure. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-azure.adoc[leveloffset=+2] - -include::modules/installation-azure-user-defined-routing.adoc[leveloffset=+2] - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-three-node.adoc b/installing/installing_azure/installing-azure-three-node.adoc deleted file mode 100644 index d7c5e24d8bc4..000000000000 --- a/installing/installing_azure/installing-azure-three-node.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-three-node"] -= Installing a three-node cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Microsoft Azure. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -[NOTE] -==== -Deploying a three-node cluster using an Azure Marketplace image is not supported. -==== - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Installing a cluster on Azure with customizations] -* xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Installing a cluster on Azure using ARM templates] diff --git a/installing/installing_azure/installing-azure-user-infra.adoc b/installing/installing_azure/installing-azure-user-infra.adoc deleted file mode 100644 index c54858c8f497..000000000000 --- a/installing/installing_azure/installing-azure-user-infra.adoc +++ /dev/null @@ -1,143 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-user-infra"] -= Installing a cluster on Azure using ARM templates -include::_attributes/common-attributes.adoc[] -:context: installing-azure-user-infra -:platform: Azure - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure by using infrastructure that you provide. - -Several link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview[Azure Resource Manager] (ARM) templates are provided to assist in completing these steps or to help model your own. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several ARM templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster. -* You downloaded the Azure CLI and installed it on your computer. See link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest[Install the Azure CLI] in the Azure documentation. The documentation below was last tested using version `2.38.0` of the Azure CLI. Azure CLI commands might perform differently based on the version you use. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-azure-user-infra-config-project"] -== Configuring your Azure project - -Before you can install {product-title}, you must configure an Azure project to host it. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage] - -include::modules/installation-azure-network-config.adoc[leveloffset=+2] - -You can view Azure's DNS solution by visiting this xref:installation-azure-create-dns-zones_{context}[example for creating DNS zones]. - -include::modules/installation-azure-increasing-limits.adoc[leveloffset=+2] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-azure-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-azure.adoc[leveloffset=+2] -include::modules/installation-azure-service-principal.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-azure-regions.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables-arm-templates.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-azure-create-resource-group-and-identity.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-azure-create-dns-zones.adoc[leveloffset=+1] - -You can learn more about xref:installation-azure-network-config_{context}[configuring a public DNS zone in Azure] by visiting that section. - -include::modules/installation-creating-azure-vnet.adoc[leveloffset=+1] -include::modules/installation-arm-vnet.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-deploying-rhcos.adoc[leveloffset=+1] -include::modules/installation-arm-image-storage.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-dns.adoc[leveloffset=+1] -include::modules/installation-arm-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-bootstrap.adoc[leveloffset=+1] -include::modules/installation-arm-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-control-plane.adoc[leveloffset=+1] -include::modules/installation-arm-control-plane.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-worker.adoc[leveloffset=+1] -include::modules/installation-arm-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-azure-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service diff --git a/installing/installing_azure/installing-azure-vnet.adoc b/installing/installing_azure/installing-azure-vnet.adoc deleted file mode 100644 index 0cc586737a1e..000000000000 --- a/installing/installing_azure/installing-azure-vnet.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-vnet"] -= Installing a cluster on Azure into an existing VNet -include::_attributes/common-attributes.adoc[] -:context: installing-azure-vnet - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Azure Virtual Network (VNet) on Microsoft Azure. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/manually-creating-iam-azure.adoc b/installing/installing_azure/manually-creating-iam-azure.adoc deleted file mode 100644 index 6b652619bb9c..000000000000 --- a/installing/installing_azure/manually-creating-iam-azure.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-azure"] -= Manually creating IAM for Azure -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -[id="manually-creating-iam-azure-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Installing a cluster quickly on Azure] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/installing/installing_azure/modules b/installing/installing_azure/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_azure/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_azure/preparing-to-install-on-azure.adoc b/installing/installing_azure/preparing-to-install-on-azure.adoc deleted file mode 100644 index 0fdcaa209b57..000000000000 --- a/installing/installing_azure/preparing-to-install-on-azure.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-azure"] -= Preparing to install on Azure -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-azure - -toc::[] - -[id="preparing-to-install-on-azure-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-azure"] -== Requirements for installing {product-title} on Azure - -Before installing {product-title} on Microsoft Azure, you must configure an Azure account. See xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[Configuring an Azure account] for details about account configuration, account limits, public DNS zone configuration, required roles, creating service principals, and supported Azure regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] for other options. - -[id="choosing-an-method-to-install-ocp-on-azure"] -== Choosing a method to install {product-title} on Azure - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-azure-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on Azure infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Installing a cluster quickly on Azure]**: You can install {product-title} on Azure infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Installing a customized cluster on Azure]**: You can install a customized cluster on Azure infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Installing a cluster on Azure with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[Installing a cluster on Azure into an existing VNet]**: You can install {product-title} on an existing Azure Virtual Network (VNet) on Azure. You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Installing a private cluster on Azure]**: You can install a private cluster into an existing Azure Virtual Network (VNet) on Azure. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Installing a cluster on Azure into a government region]**: {product-title} can be deployed into Microsoft Azure Government (MAG) regions that are specifically designed for US government agencies at the federal, state, and local level, as well as contractors, educational institutions, and other US customers that must run sensitive workloads on Azure. - -[id="choosing-an-method-to-install-ocp-on-azure-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on Azure infrastructure that you provision, by using the following method: - -* **xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Installing a cluster on Azure using ARM templates]**: You can install {product-title} on Azure by using infrastructure that you provide. You can use the provided Azure Resource Manager (ARM) templates to assist with an installation. - -[id="preparing-to-install-on-azure-next-steps"] -== Next steps - -* xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[Configuring an Azure account] diff --git a/installing/installing_azure/snippets b/installing/installing_azure/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_azure/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_azure/uninstalling-cluster-azure.adoc b/installing/installing_azure/uninstalling-cluster-azure.adoc deleted file mode 100644 index 2b0264be291c..000000000000 --- a/installing/installing_azure/uninstalling-cluster-azure.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-azure"] -= Uninstalling a cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-azure - -toc::[] - -You can remove a cluster that you deployed to Microsoft Azure. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_azure_stack_hub/_attributes b/installing/installing_azure_stack_hub/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_azure_stack_hub/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/images b/installing/installing_azure_stack_hub/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/installing/installing_azure_stack_hub/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc deleted file mode 100644 index 222d71807560..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-account"] -= Configuring an Azure Stack Hub account -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-account - -toc::[] - -Before you can install {product-title}, you must configure a Microsoft Azure account. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-stack-hub-network-config.adoc[leveloffset=+1] - -include::modules/installation-azure-stack-hub-permissions.adoc[leveloffset=+1] - -include::modules/installation-azure-service-principal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -[id="next-steps_installing-azure-stack-hub-account"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster quickly on Azure Stack Hub]. -** Install an {product-title} cluster on Azure Stack Hub with user-provisioned infrastructure by following xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc deleted file mode 100644 index 52362cb37c03..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-default"] -= Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure Stack Hub with an installer-provisioned infrastructure. However, you must manually configure the `install-config.yaml` file to specify values that are specific to Azure Stack Hub. - -[NOTE] -==== -While you can select `azure` when using the installation program to deploy a cluster using installer-provisioned infrastructure, this option is only supported for the Azure Public Cloud. -==== - -[id="prerequisites_installing-azure-stack-hub-default"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You verified that you have approximately 16 GB of local disk space. Installing the cluster requires that you download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. Decompressing the VHD files requires this amount of local disk space. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-cco"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#manually-maintained-credentials-upgrade_preparing-manual-creds-update[Updating cloud provider resources with manually maintained credentials] - -include::modules/azure-stack-hub-internal-ca.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-azure-stack-hub-default"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc deleted file mode 100644 index 393738d4fb93..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc +++ /dev/null @@ -1,88 +0,0 @@ -[id="installing-azure-stack-hub-network-customizations"] -= Installing a cluster on Azure Stack Hub with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a customized network configuration on infrastructure that the installation program provisions on Azure Stack Hub. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. - -[NOTE] -==== -While you can select `azure` when using the installation program to deploy a cluster using installer-provisioned infrastructure, this option is only supported for the Azure Public Cloud. -==== - -[id="prerequisites_installing-azure-stack-hub-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You verified that you have approximately 16 GB of local disk space. Installing the cluster requires that you download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. Decompressing the VHD files requires this amount of local disk space. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-cco"] -.Additional resources -* xref:../../updating/updating_a_cluster/updating-cluster-web-console.adoc#manually-maintained-credentials-upgrade_updating-cluster-web-console[Updating a cluster using the web console] -* xref:../../updating/updating_a_cluster/updating-cluster-cli.adoc#manually-maintained-credentials-upgrade_updating-cluster-cli[Updating a cluster using the CLI] - -include::modules/azure-stack-hub-internal-ca.adoc[leveloffset=+1] - -//include::modules/installation-launching-installer.adoc[leveloffset=+1] -//Leaving this stubbed in case future might remove the requirement to manually configure the install configuration file. - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console]. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-azure-stack-hub-network-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc deleted file mode 100644 index f96cdaca9dee..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc +++ /dev/null @@ -1,121 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-user-infra"] -= Installing a cluster on Azure Stack Hub using ARM templates -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure Stack Hub by using infrastructure that you provide. - -Several link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview[Azure Resource Manager] (ARM) templates are provided to assist in completing these steps or to help model your own. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several ARM templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -[id="prerequisites_installing-azure-stack-hub-user-infra"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* You downloaded the Azure CLI and installed it on your computer. See link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest[Install the Azure CLI] in the Azure documentation. The documentation below was tested using version `2.28.0` of the Azure CLI. Azure CLI commands might perform differently based on the version you use. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-azure-stack-hub-user-infra-config-project"] -== Configuring your Azure Stack Hub project - -Before you can install {product-title}, you must configure an Azure project to host it. - -[IMPORTANT] -==== -All Azure Stack Hub resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure Stack Hub restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-stack-hub-network-config.adoc[leveloffset=+2] - -You can view Azure's DNS solution by visiting this xref:installation-azure-create-dns-zones_{context}[example for creating DNS zones]. - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-azure-stack-hub-permissions.adoc[leveloffset=+2] -include::modules/installation-azure-service-principal.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-initializing-manual.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables-arm-templates.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-azure-create-resource-group-and-identity.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-azure-create-dns-zones.adoc[leveloffset=+1] - -You can learn more about xref:installation-azure-stack-hub-network-config_{context}[configuring a DNS zone in Azure Stack Hub] by visiting that section. - -include::modules/installation-creating-azure-vnet.adoc[leveloffset=+1] -include::modules/installation-arm-vnet.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-deploying-rhcos.adoc[leveloffset=+1] -include::modules/installation-arm-image-storage.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-dns.adoc[leveloffset=+1] -include::modules/installation-arm-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-bootstrap.adoc[leveloffset=+1] -include::modules/installation-arm-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-control-plane.adoc[leveloffset=+1] -include::modules/installation-arm-control-plane.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-worker.adoc[leveloffset=+1] -include::modules/installation-arm-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-azure-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-completing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. diff --git a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc deleted file mode 100644 index 9beb6096fff2..000000000000 --- a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-azure-stack-hub"] -= Manually creating IAM for Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure-stack-hub - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -//// -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. -//// -// Until ASH supports other credential scenarios besides manual mode, the tone for this article will be manual mode use only. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -// I was going to update this but I think the assembly is no longer used and will ask install team if I can get rid of it entirely. -include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] - -[id="next-steps_manually-creating-iam-azure-stack-hub"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster quickly on Azure Stack Hub]. -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]. diff --git a/installing/installing_azure_stack_hub/modules b/installing/installing_azure_stack_hub/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/installing/installing_azure_stack_hub/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc deleted file mode 100644 index 2ca66634c32b..000000000000 --- a/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-azure-stack-hub"] -= Preparing to install on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-azure-stack-hub - -toc::[] - -[id="preparing-to-install-on-ash-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You have installed Azure Stack Hub version 2008 or later. - -[id="requirements-for-installing-ocp-on-ash"] -== Requirements for installing {product-title} on Azure Stack Hub - -Before installing {product-title} on Microsoft Azure Stack Hub, you must configure an Azure account. - -See xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[Configuring an Azure Stack Hub account] for details about account configuration, account limits, DNS zone configuration, required roles, and creating service principals. - -[id="choosing-a-method-to-install-ocp-on-ash"] -== Choosing a method to install {product-title} on Azure Stack Hub - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-a-method-to-install-ocp-on-ash-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on Azure Stack Hub infrastructure that is provisioned by the {product-title} installation program, by using the following method: - -* **xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure]**: You can install {product-title} on Azure Stack Hub infrastructure that is provisioned by the {product-title} installation program. - -[id="choosing-a-method-to-install-ocp-on-ash-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on Azure Stack Hub infrastructure that you provision, by using the following method: - -* **xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]**: You can install {product-title} on Azure Stack Hub by using infrastructure that you provide. You can use the provided Azure Resource Manager (ARM) templates to assist with an installation. - -[id="preparing-to-install-on-ash-next-steps"] -== Next steps - -* xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[Configuring an Azure Stack Hub account] diff --git a/installing/installing_azure_stack_hub/snippets b/installing/installing_azure_stack_hub/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_azure_stack_hub/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc deleted file mode 100644 index 72ede5bd654f..000000000000 --- a/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-azure-stack-hub"] -= Uninstalling a cluster on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-azure-stack-hub - -toc::[] - -You can remove a cluster that you deployed to Azure Stack Hub. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal/_attributes b/installing/installing_bare_metal/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_bare_metal/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_bare_metal/images b/installing/installing_bare_metal/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_bare_metal/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc b/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc deleted file mode 100644 index 5ba1910e9cb2..000000000000 --- a/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc +++ /dev/null @@ -1,197 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-bare-metal-network-customizations"] -= Installing a user-provisioned bare metal cluster with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-bare-metal-network-customizations - -toc::[] - -In {product-title} {product-version}, you can install a cluster on bare -metal infrastructure that you provision with customized network configuration -options. By customizing your network configuration, your cluster can coexist -with existing IP address allocations in your environment and integrate with -existing MTU and VXLAN configurations. - -When you customize {product-title} networking, you must set most of the network configuration parameters during installation. You can modify only `kubeProxy` network configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network] for more information about performing a restricted network installation on bare metal infrastructure that you provision. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-three-node-cluster_installing-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-approve-csrs_installing-bare-metal-network-customizations[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-provisioned-validating-dns_installing-bare-metal-network-customizations[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+2] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_bare_metal/installing-bare-metal.adoc b/installing/installing_bare_metal/installing-bare-metal.adoc deleted file mode 100644 index 9febfd635b59..000000000000 --- a/installing/installing_bare_metal/installing-bare-metal.adoc +++ /dev/null @@ -1,224 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-bare-metal"] -= Installing a user-provisioned cluster on bare metal -include::_attributes/common-attributes.adoc[] -:context: installing-bare-metal - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -bare metal infrastructure that you provision. - -[IMPORTANT] -==== -While you might be able to follow this procedure to deploy a cluster on -virtualized or cloud environments, you must be aware of additional -considerations for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] -before you attempt to install an {product-title} cluster in such an environment. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network] for more information about performing a restricted network installation on bare metal infrastructure that you provision. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-three-node-cluster_installing-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-approve-csrs_installing-bare-metal[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. -* See xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc[Enabling cluster capabilities] for more information on enabling cluster capabilities that were disabled prior to installation. -* See xref:../../installing/cluster-capabilities.adoc#explanation_of_capabilities_cluster-capabilities[Optional cluster capabilities in {product-title} {product-version}] for more information about the features provided by each capability. - - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] for more information on using special `coreos.inst.*` arguments to direct the live installer. - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc b/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc deleted file mode 100644 index 6887619c59b3..000000000000 --- a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc +++ /dev/null @@ -1,226 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-bare-metal"] -= Installing a user-provisioned bare metal cluster on a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-bare-metal - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -bare metal infrastructure that you provision in a restricted network. - -[IMPORTANT] -==== -While you might be able to follow this procedure to deploy a cluster on -virtualized or cloud environments, you must be aware of additional -considerations for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] -before you attempt to install an {product-title} cluster in such an environment. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide ReadWriteMany access modes. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-three-node-cluster_installing-restricted-networks-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-approve-csrs_installing-bare-metal[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. - -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-change-management-state.adoc[leveloffset=+3] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_bare_metal/modules b/installing/installing_bare_metal/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_bare_metal/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc b/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc deleted file mode 100644 index f828cc638052..000000000000 --- a/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-bare-metal"] -= Preparing for bare metal cluster installation -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-bare-metal - -toc::[] - -[id="preparing_preparing-to-install-on-bare-metal"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -include::modules/virt-planning-bare-metal-cluster-for-ocp-virt.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../virt/getting_started/virt-getting-started.adoc#virt-getting-started[Getting started with {VirtProductName}] -* xref:../../virt/install/preparing-cluster-for-virt.adoc#preparing-cluster-for-virt[Preparing your cluster for {VirtProductName}] -* xref:../../networking/hardware_networks/about-sriov.adoc#about-sriov[About Single Root I/O Virtualization (SR-IOV) hardware networks] -* xref:../../virt/virtual_machines/vm_networking/virt-attaching-vm-to-sriov-network.adoc#virt-attaching-vm-to-sriov-network[Connecting a virtual machine to an SR-IOV network] - -include::modules/nw-sriov-dual-nic-con.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#agent-install-sample-config-bond-sriov_preparing-to-install-with-agent-based-installer[Example: Bonds and SR-IOV dual-nic node network configuration] - -* xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-dual-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Optional: Configuring host network interfaces for dual port NIC] - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#bonding-multiple-sriov-network-interfaces-to-dual-port_installing-bare-metal[Bonding multiple SR-IOV network interfaces to a dual port NIC interface] - -[id="choosing-a-method-to-install-ocp-on-bare-metal"] -== Choosing a method to install {product-title} on bare metal - -The {product-title} installation program offers four methods for deploying a cluster: - -* *Interactive*: You can deploy a cluster with the web-based link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. This is the recommended approach for clusters with networks connected to the internet. The {ai-full} is the easiest way to install {product-title}, it provides smart defaults, and it performs pre-flight validations before installing the cluster. It also provides a RESTful API for automation and advanced configuration scenarios. - -* *Local Agent-based*: You can deploy a cluster locally with the xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[agent-based installer] for air-gapped or restricted networks. It provides many of the benefits of the {ai-full}, but you must download and configure the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. Configuration is done with a commandline interface. This approach is ideal for air-gapped or restricted networks. - -* *Automated*: You can xref:../../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[deploy a cluster on installer-provisioned infrastructure] and the cluster it maintains. The installer uses each cluster host's baseboard management controller (BMC) for provisioning. You can deploy clusters with both connected or air-gapped or restricted networks. - -* *Full control*: You can deploy a cluster on xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[infrastructure that you prepare and maintain], which provides maximum customizability. You can deploy clusters with both connected or air-gapped or restricted networks. - -The clusters have the following characteristics: - -* Highly available infrastructure with no single points of failure is available by default. -* Administrators maintain control over what updates are applied and when. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-a-method-to-install-ocp-on-bare-metal-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on bare metal infrastructure that is provisioned by the {product-title} installation program, by using the following method: - -* **xref:../../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[Installing an installer-provisioned cluster on bare metal]**: You can install {product-title} on bare metal by using installer provisioning. - -[id="choosing-a-method-to-install-ocp-on-bare-metal-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on bare metal infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Installing a user-provisioned cluster on bare metal]**: You can install {product-title} on bare metal infrastructure that you provision. For a cluster that contains user-provisioned infrastructure, you must deploy all of the required machines. - -* **xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[Installing a user-provisioned bare metal cluster with network customizations]**: You can install a bare metal cluster on user-provisioned infrastructure with network-customizations. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. Most of the network customizations must be applied at the installation stage. - -* **xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network]**: You can install a user-provisioned bare metal cluster on a restricted or disconnected network by using a mirror registry. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc b/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc deleted file mode 100644 index 907fdb418584..000000000000 --- a/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="scaling-a-user-provisioned-cluster-with-the-bare-metal-operator"] -= Scaling a user-provisioned cluster with the Bare Metal Operator -include::_attributes/common-attributes.adoc[] -:context: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator - -toc::[] - -After deploying a user-provisioned infrastructure cluster, you can use the Bare Metal Operator (BMO) and other metal3 components to scale bare-metal hosts in the cluster. This approach helps you to scale a user-provisioned cluster in a more automated way. - -include::modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc[leveloffset=+1] -include::modules/upi-prerequisites-for-scaling-a-upi-cluster.adoc[leveloffset=+2] -include::modules/upi-limitations-for-scaling-a-upi-cluster.adoc[leveloffset=+2] -include::modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc[leveloffset=+1] -include::modules/upi-provisioning-new-hosts-in-a-upi-cluster.adoc[leveloffset=+1] -include::modules/upi-managing-existing-hosts-in-a-upi-cluster.adoc[leveloffset=+1] -include::modules/upi-removing-hosts-from-a-upi-cluster.adoc[leveloffset=+1] - - diff --git a/installing/installing_bare_metal/snippets b/installing/installing_bare_metal/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_bare_metal/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/_attributes b/installing/installing_bare_metal_ipi/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_bare_metal_ipi/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/images b/installing/installing_bare_metal_ipi/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_bare_metal_ipi/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc b/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc deleted file mode 100644 index 7d9217fd1f34..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-expanding-the-cluster"] -= Expanding the cluster -include::_attributes/common-attributes.adoc[] -:context: ipi-install-expanding - -toc::[] - -After deploying an installer-provisioned {product-title} cluster, you can use the following procedures to expand the number of worker nodes. Ensure that each prospective worker node meets the prerequisites. - -[NOTE] -==== -Expanding the cluster using RedFish Virtual Media involves meeting minimum firmware requirements. See *Firmware requirements for installing with virtual media* in the *Prerequisites* section for additional details when expanding the cluster using RedFish Virtual Media. -==== - -include::modules/ipi-install-preparing-the-bare-metal-node.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Optional: Configuring host network interfaces in the install-config.yaml file] for details on configuring the NMState syntax. -* See xref:../../scalability_and_performance/managing-bare-metal-hosts.adoc#automatically-scaling-machines-to-available-bare-metal-hosts_managing-bare-metal-hosts[Automatically scaling machines to the number of available bare metal hosts] for details on automatically scaling machines. - -include::modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-the-unhealthy-etcd-member[Replacing an unhealthy etcd member] - -* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[Backing up etcd] - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing] - -include::modules/ipi-install-preparing-to-deploy-with-virtual-media-on-the-baremetal-network.adoc[leveloffset=+1] - -include::modules/ipi-install-diagnosing-duplicate-mac-address.adoc[leveloffset=+1] - -include::modules/ipi-install-provisioning-the-bare-metal-node.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc b/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc deleted file mode 100644 index 857b3f5074ee..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc +++ /dev/null @@ -1,121 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-installation-workflow"] -= Setting up the environment for an OpenShift installation -include::_attributes/common-attributes.adoc[] -:context: ipi-install-installation-workflow - -toc::[] - -include::modules/ipi-install-installing-rhel-on-the-provisioner-node.adoc[leveloffset=+1] - -include::modules/ipi-install-preparing-the-provisioner-node-for-openshift-install.adoc[leveloffset=+1] - -include::modules/ipi-install-configuring-networking.adoc[leveloffset=+1] - -include::modules/ipi-install-establishing-communication-between-subnets.adoc[leveloffset=+1] - -include::modules/ipi-install-retrieving-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-creating-an-rhcos-images-cache.adoc[leveloffset=+1] - -[id="ipi-install-configuration-files"] -[id="additional-resources_config"] -== Configuring the install-config.yaml file - -include::modules/ipi-install-configuring-the-install-config-file.adoc[leveloffset=+2] - -include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-dell-idrac.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc[leveloffset=+2] - -include::modules/ipi-install-root-device-hints.adoc[leveloffset=+2] - -include::modules/ipi-install-setting-proxy-settings-within-install-config.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-host-network-interfaces-in-the-install-config.yaml-file.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-host-network-interfaces-for-subnets.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-host-dual-network-interfaces-in-the-install-config.yaml-file.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_networking/configuring-network-bonding_configuring-and-managing-networking[Configuring network bonding] - -include::modules/ipi-install-configure-multiple-cluster-nodes.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc[leveloffset=+2] - -[id="ipi-install-manifest-configuration-files"] -== Manifest configuration files - -include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+2] - -include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+2] - -include::modules/ipi-install-deploying-routers-on-worker-nodes.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-the-bios.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_bare_metal_config"] -.Additional resources - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -include::modules/ipi-install-configuring-the-raid.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-storage-on-nodes.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_raid_config"] -.Additional resources - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/managing_storage_devices/index#partition-naming-scheme_disk-partitions[Partition naming scheme] - -include::modules/ipi-install-creating-a-disconnected-registry.adoc[leveloffset=+1] - -[discrete] -[id="prerequisites_ipi-disconnected-registry"] -=== Prerequisites - -* If you have already prepared a mirror registry for xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#prerequisites_installing-mirroring-installation-images[Mirroring images for a disconnected installation], you can skip directly to xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-modify-install-config-for-a-disconnected-registry_ipi-install-installation-workflow[Modify the install-config.yaml file to use the disconnected registry]. - -include::modules/ipi-install-preparing-a-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-install-mirroring-for-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-modify-install-config-for-a-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-install-validation-checklist-for-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-following-the-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-verifying-static-ip-address-configuration.adoc[leveloffset=+1] - -include::modules/ipi-preparing-reinstall-cluster-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_creating_manifest_ignition"] -== Additional resources -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-infra-generate-k8s-manifest-ignition_installing-bare-metal[{product-title} Creating the Kubernetes manifest and Ignition config files] -* xref:../../updating/understanding_updates/understanding-update-channels-release.adoc#understanding-update-channels-releases[Understanding update channels and releases] diff --git a/installing/installing_bare_metal_ipi/ipi-install-overview.adoc b/installing/installing_bare_metal_ipi/ipi-install-overview.adoc deleted file mode 100644 index 734d0349e999..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-overview.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-overview"] -= Overview -include::_attributes/common-attributes.adoc[] -:context: ipi-install - -toc::[] - -Installer-provisioned installation on bare metal nodes deploys and configures the infrastructure that a {product-title} cluster runs on. This guide provides a methodology to achieving a successful installer-provisioned bare-metal installation. The following diagram illustrates the installation environment in phase 1 of deployment: - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png[Deployment phase one] - -For the installation, the key elements in the previous diagram are: - -- **Provisioner**: A physical machine that runs the installation program and hosts the bootstrap VM that deploys the controller of a new {product-title} cluster. -- **Bootstrap VM**: A virtual machine used in the process of deploying an {product-title} cluster. -- **Network bridges**: The bootstrap VM connects to the bare metal network and to the provisioning network, if present, via network bridges, `eno1` and `eno2`. - -In phase 2 of the deployment, the provisioner destroys the bootstrap VM automatically and moves the virtual IP addresses (VIPs) to the appropriate nodes. The API VIP moves to the control plane nodes and the Ingress VIP moves to the worker nodes. - -The following diagram illustrates phase 2 of deployment: - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png[Deployment phase two] - -After this point, the node used by the provisioner can be removed or repurposed. From here, all additional provisioning tasks are carried out by controllers. - -[IMPORTANT] -==== -The provisioning network is optional, but it is required for PXE booting. If you deploy without a provisioning network, you must use a virtual media baseboard management controller (BMC) addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. -==== diff --git a/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc b/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc deleted file mode 100644 index ae35096b1bfe..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-post-installation-configuration"] -= Installer-provisioned post-installation configuration -include::_attributes/common-attributes.adoc[] -:context: ipi-install-post-installation-configuration - -toc::[] - -After successfully deploying an installer-provisioned cluster, consider the following post-installation procedures. - -include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+1] - -include::modules/nw-enabling-a-provisioning-network-after-installation.adoc[leveloffset=+1] - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc b/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc deleted file mode 100644 index 1c4c9c5a7a85..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-prerequisites"] -= Prerequisites -include::_attributes/common-attributes.adoc[] -:context: ipi-install-prerequisites - -toc::[] - -Installer-provisioned installation of {product-title} requires: - -ifdef::openshift-origin[. One provisioner node with {op-system-first} installed. The provisioner can be removed after installation.] -ifndef::openshift-origin[. One provisioner node with {op-system-base-full} 8.x installed. The provisioner can be removed after installation.] -. Three control plane nodes -. Baseboard management controller (BMC) access to each node -. At least one network: -.. One required routable network -.. One optional provisioning network -.. One optional management network - -Before starting an installer-provisioned installation of {product-title}, ensure the hardware environment meets the following requirements. - -include::modules/ipi-install-node-requirements.adoc[leveloffset=+1] - -include::modules/virt-planning-bare-metal-cluster-for-ocp-virt.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../virt/install/preparing-cluster-for-virt.adoc#preparing-cluster-for-virt[Preparing your cluster for {VirtProductName}] -* xref:../../networking/hardware_networks/about-sriov.adoc#about-sriov[About Single Root I/O Virtualization (SR-IOV) hardware networks] -* xref:../../virt/virtual_machines/vm_networking/virt-attaching-vm-to-sriov-network.adoc#virt-attaching-vm-to-sriov-network[Connecting a virtual machine to an SR-IOV network] - -include::modules/ipi-install-firmware-requirements-for-installing-with-virtual-media.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -xref:../../installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc#unable-to-discover-new-bare-metal-hosts-using-the-bmc_ipi-install-troubleshooting[Unable to discover new bare metal hosts using the BMC] - -include::modules/ipi-install-network-requirements.adoc[leveloffset=+1] - -include::modules/ipi-install-configuring-nodes.adoc[leveloffset=+1] - -include::modules/ipi-install-out-of-band-management.adoc[leveloffset=+1] - -include::modules/ipi-install-required-data-for-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-validation-checklist-for-nodes.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc b/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc deleted file mode 100644 index 7f5548f1e1b7..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: ipi-install-troubleshooting - -toc::[] - - -== Troubleshooting the installer workflow - -Prior to troubleshooting the installation environment, it is critical to understand the overall flow of the installer-provisioned installation on bare metal. The diagrams below provide a troubleshooting flow with a step-by-step breakdown for the environment. - -image:flow1.png[Flow-Diagram-1] - -_Workflow 1 of 4_ illustrates a troubleshooting workflow when the `install-config.yaml` file has errors or the {op-system-first} images are inaccessible. Troubleshooting suggestions can be found at xref:ipi-install-troubleshooting-install-config_{context}[Troubleshooting `install-config.yaml`]. - -image:flow2.png[Flow-Diagram-2] - -_Workflow 2 of 4_ illustrates a troubleshooting workflow for xref:ipi-install-troubleshooting-bootstrap-vm_{context}[ bootstrap VM issues], xref:ipi-install-troubleshooting-bootstrap-vm-cannot-boot_{context}[ bootstrap VMs that cannot boot up the cluster nodes], and xref:ipi-install-troubleshooting-bootstrap-vm-inspecting-logs_{context}[ inspecting logs]. When installing an {product-title} cluster without the `provisioning` network, this workflow does not apply. - -image:flow3.png[Flow-Diagram-3] - -_Workflow 3 of 4_ illustrates a troubleshooting workflow for xref:ipi-install-troubleshooting-cluster-nodes-will-not-pxe_{context}[ cluster nodes that will not PXE boot]. If installing using RedFish Virtual Media, each node must meet minimum firmware requirements for the installer to deploy the node. See *Firmware requirements for installing with virtual media* in the *Prerequisites* section for additional details. - -image:flow4.png[Flow-Diagram-4] - -_Workflow 4 of 4_ illustrates a troubleshooting workflow from -xref:ipi-install-troubleshooting-api-not-accessible_{context}[ a non-accessible API] to a xref:ipi-install-troubleshooting-reviewing-the-installation_{context}[validated installation]. - - -include::modules/ipi-install-troubleshooting-install-config.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-bootstrap-vm.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-bootstrap-vm-cannot-boot.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-bootstrap-vm-inspecting-logs.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-cluster-nodes-will-not-pxe.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting_unable-to-discover-new-bare-metal-hosts-using-the-bmc.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-api-not-accessible.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting_proc_worker-nodes-cannot-join-the-cluster.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-cleaning-up-previous-installations.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-registry-issues.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-misc-issues.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-failed-ignition-during-firstboot.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-reviewing-the-installation.adoc[leveloffset=+1] - diff --git a/installing/installing_bare_metal_ipi/modules b/installing/installing_bare_metal_ipi/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_bare_metal_ipi/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/snippets b/installing/installing_bare_metal_ipi/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_bare_metal_ipi/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_gcp/_attributes b/installing/installing_gcp/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_gcp/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_gcp/images b/installing/installing_gcp/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_gcp/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_gcp/installation-config-parameters-gcp.adoc b/installing/installing_gcp/installation-config-parameters-gcp.adoc deleted file mode 100644 index d1f6c11dda7f..000000000000 --- a/installing/installing_gcp/installation-config-parameters-gcp.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="installation-config-parameters-gcp"] -= Installation configuration parameters for GCP -include::_attributes/common-attributes.adoc[] -:context: installation-config-parameters-gcp -:platform: GCP - -toc::[] - -Before you deploy an {product-title} cluster on Google Cloud Platform (GCP), you provide parameters to customize your cluster and the platform that hosts it. When you create the `install-config.yaml` file, you provide values for the required parameters through the command line. You can then modify the `install-config.yaml` file to customize your cluster further. - -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-gcp-account.adoc b/installing/installing_gcp/installing-gcp-account.adoc deleted file mode 100644 index d8344c97ab56..000000000000 --- a/installing/installing_gcp/installing-gcp-account.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-account"] -= Configuring a GCP project -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-account - -toc::[] - -Before you can install {product-title}, you must configure a -Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+1] - -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+1] - -include::modules/installation-gcp-dns.adoc[leveloffset=+1] - -include::modules/installation-gcp-limits.adoc[leveloffset=+1] - -include::modules/installation-gcp-service-account.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-create-iam_manually-creating-iam-gcp[Manually creating IAM] for more details about using manual credentials mode. - -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] - -include::modules/minimum-required-permissions-ipi-gcp.adoc[leveloffset=+2] - -include::modules/minimum-required-permissions-ipi-gcp-xpn.adoc[leveloffset=+2] - -include::modules/installation-gcp-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster on GCP. You can -xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[install a customized cluster] -or xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[quickly install a cluster] -with default options. diff --git a/installing/installing_gcp/installing-gcp-customizations.adoc b/installing/installing_gcp/installing-gcp-customizations.adoc deleted file mode 100644 index e5b7c678f52a..000000000000 --- a/installing/installing_gcp/installing-gcp-customizations.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-customizations"] -= Installing a cluster on GCP with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-customizations -:platform: GCP - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Google Cloud Platform (GCP). To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-gcp-marketplace.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-default.adoc b/installing/installing_gcp/installing-gcp-default.adoc deleted file mode 100644 index ba1b623354c8..000000000000 --- a/installing/installing_gcp/installing-gcp-default.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-default"] -= Installing a cluster quickly on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Google Cloud Platform (GCP) that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-network-customizations.adoc b/installing/installing_gcp/installing-gcp-network-customizations.adoc deleted file mode 100644 index 812be29cb8c4..000000000000 --- a/installing/installing_gcp/installing-gcp-network-customizations.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-network-customizations"] -= Installing a cluster on GCP with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program -provisions on Google Cloud Platform (GCP). By customizing your network -configuration, your cluster can coexist with existing IP address allocations in -your environment and integrate with existing MTU and VXLAN configurations. To -customize the installation, you modify parameters in the `install-config.yaml` -file before you install the cluster. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-private.adoc b/installing/installing_gcp/installing-gcp-private.adoc deleted file mode 100644 index dd22ac4d3f7e..000000000000 --- a/installing/installing_gcp/installing-gcp-private.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-private"] -= Installing a private cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-gcp.adoc[leveloffset=+2] - -include::modules/installation-about-custom-gcp-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-shared-vpc.adoc b/installing/installing_gcp/installing-gcp-shared-vpc.adoc deleted file mode 100644 index 662d13b91425..000000000000 --- a/installing/installing_gcp/installing-gcp-shared-vpc.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-shared-vpc"] -= Installing a cluster on GCP into a shared VPC -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-shared-vpc -:FeatureName: Installing a cluster on GCP into a shared VPC - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP). In this installation method, the cluster is configured to use a VPC from a different GCP project. A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IP addresses from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview in the GCP documentation]. - -The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="installation-gcp-shared-vpc-prerequisites_{context}"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -* You have a GCP host project which contains a shared VPC network. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. This project, known as the service project, must be attached to the host project. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#create-shared[Attaching service projects in the GCP documentation]. -* You have a GCP service account that has the xref:../../installing/installing_gcp/installing-gcp-account.adoc#minimum-required-permissions-ipi-gcp-xpn[required GCP permissions] in both the host and service projects. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-shared-vpc-config.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="installation-gcp-shared-vpc-next-steps_{context}"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-three-node.adoc b/installing/installing_gcp/installing-gcp-three-node.adoc deleted file mode 100644 index 63d7298e7ed6..000000000000 --- a/installing/installing_gcp/installing-gcp-three-node.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-three-node"] -= Installing a three-node cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Google Cloud Platform (GCP). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates] diff --git a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc deleted file mode 100644 index ddfd93948cb0..000000000000 --- a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +++ /dev/null @@ -1,172 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-user-infra-vpc"] -= Installing a cluster into a shared VPC on GCP using Deployment Manager templates -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-user-infra-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP) that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed. - -A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IPs from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview] in the GCP documentation. - -The steps for performing a user-provided infrastructure installation into a shared VPC are outlined here. Several -link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in -completing these steps or to help model your own. You are also free to create -the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/csr-management.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-config-project-vpc"] -== Configuring the GCP project that hosts your cluster - -Before you can install {product-title}, you must configure a Google Cloud -Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+3] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-config-host-project-vpc.adoc[leveloffset=+1] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+3] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -//// -[role="_additional-resources"] -.Additional resources -//// - -[id="installation-gcp-user-infra-exporting-common-variables-vpc"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-vpc-adding-firewall-rules"] -== Adding ingress firewall rules -The cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the Ingress Controller via the GCP cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters. - -If you choose to create each rule based on events, you must create firewall rules after you provision the cluster and during the life of the cluster when the console notifies you that rules are missing. Events that are similar to the following event are displayed, and you must add the firewall rules that are required: - -[source,terminal] ----- -$ oc get events -n openshift-ingress --field-selector="reason=LoadBalancerManualChange" ----- - -.Example output -[source,terminal] ----- -Firewall change required by security admin: `gcloud compute firewall-rules create k8s-fw-a26e631036a3f46cba28f8df67266d55 --network example-network --description "{\"kubernetes.io/service-name\":\"openshift-ingress/router-default\", \"kubernetes.io/service-ip\":\"35.237.236.234\"}\" --allow tcp:443,tcp:80 --source-ranges 0.0.0.0/0 --target-tags exampl-fqzq7-master,exampl-fqzq7-worker --project example-project` ----- - -If you encounter issues when creating these rule-based events, you can configure the cluster-wide firewall rules while your cluster is running. - -include::modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc[leveloffset=+2] - -//include::modules/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-user-infra.adoc b/installing/installing_gcp/installing-gcp-user-infra.adoc deleted file mode 100644 index d1d1f5cd49df..000000000000 --- a/installing/installing_gcp/installing-gcp-user-infra.adoc +++ /dev/null @@ -1,140 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-user-infra"] -= Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-user-infra -:platform: GCP - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide. - -The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/csr-management.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-config-project"] -== Configuring your GCP project - -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-gcp.adoc[leveloffset=+2] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installation-gcp-user-infra-adding-ingress_installing-gcp-user-infra[Optional: Adding the ingress DNS records] - -[id="installation-gcp-user-infra-exporting-common-variables"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../networking/ingress-operator.adoc#nw-ingress-controller-configuration-gcp-global-access_configuring-ingress[Configure Global Access for an Ingress Controller on GCP]. diff --git a/installing/installing_gcp/installing-gcp-vpc.adoc b/installing/installing_gcp/installing-gcp-vpc.adoc deleted file mode 100644 index 58e5b738f11f..000000000000 --- a/installing/installing_gcp/installing-gcp-vpc.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-vpc"] -= Installing a cluster on GCP into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-custom-gcp-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc b/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc deleted file mode 100644 index 247aec7a8cb3..000000000000 --- a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-gcp-installer-provisioned"] -= Installing a cluster on GCP in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-gcp-installer-provisioned - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Google Cloud Platform (GCP) in a restricted network by creating an internal mirror of the installation release content on an existing Google Virtual Private Cloud (VPC). - -[IMPORTANT] -==== -You can install an {product-title} cluster by using mirrored installation release content, but your cluster will require internet access to use the GCP APIs. -==== - -[id="prerequisites_installing-restricted-networks-gcp-installer-provisioned"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in GCP. While installing a cluster in a restricted network that uses installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. While you might need to grant access to more sites, you must grant access to `*.googleapis.com` and `accounts.google.com`. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-restricted-networks-gcp-installer-provisioned"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-restricted-networks-gcp.adoc b/installing/installing_gcp/installing-restricted-networks-gcp.adoc deleted file mode 100644 index aa280a3df0d6..000000000000 --- a/installing/installing_gcp/installing-restricted-networks-gcp.adoc +++ /dev/null @@ -1,149 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-gcp"] -= Installing a cluster on GCP in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-gcp - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide and an internal mirror of the installation release content. - -[IMPORTANT] -==== -While you can install an {product-title} cluster by using mirrored installation release content, your cluster still requires internet access to use the GCP APIs. -==== - -The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. While you might need to grant access to more sites, you must grant access to `*.googleapis.com` and `accounts.google.com`. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-restricted-networks-gcp-user-infra-config-project"] -== Configuring your GCP project - -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-gcp.adoc[leveloffset=+2] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installation-gcp-user-infra-adding-ingress_installing-gcp-user-infra[Optional: Adding the ingress DNS records] - -[id="installation-restricted-networks-gcp-user-infra-exporting-common-variables"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -//You install the CLI on the mirror host. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/manually-creating-iam-gcp.adoc b/installing/installing_gcp/manually-creating-iam-gcp.adoc deleted file mode 100644 index 3108dbca1079..000000000000 --- a/installing/installing_gcp/manually-creating-iam-gcp.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-gcp"] -= Manually creating IAM for GCP -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-gcp - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity] -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-rotate-remove-cloud-creds[Rotating or removing cloud provider credentials] - -For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -include::modules/mint-mode.adoc[leveloffset=+1] - -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -[id="manually-creating-iam-gcp-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/installing/installing_gcp/modules b/installing/installing_gcp/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_gcp/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_gcp/preparing-to-install-on-gcp.adoc b/installing/installing_gcp/preparing-to-install-on-gcp.adoc deleted file mode 100644 index 3f92abef99df..000000000000 --- a/installing/installing_gcp/preparing-to-install-on-gcp.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-gcp"] -= Preparing to install on GCP -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-gcp - -toc::[] - -[id="{context}-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-gcp"] -== Requirements for installing {product-title} on GCP - -Before installing {product-title} on Google Cloud Platform (GCP), you must create a service account and configure a GCP project. See xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] for details about creating a project, enabling API services, configuring DNS, GCP account limits, and supported GCP regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] for other options. - -[id="choosing-an-method-to-install-ocp-on-gcp"] -== Choosing a method to install {product-title} on GCP - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-gcp-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on GCP infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP]**: You can install {product-title} on GCP infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a customized cluster on GCP]**: You can install a customized cluster on GCP infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[Installing a cluster on GCP with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[Installing a cluster on GCP in a restricted network]**: You can install {product-title} on GCP on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the GCP APIs. - -* **xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[Installing a cluster into an existing Virtual Private Cloud]**: You can install {product-title} on an existing GCP Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits on creating new accounts or infrastructure. - -* **xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing GCP VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -[id="choosing-an-method-to-install-ocp-on-gcp-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on GCP infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on GCP with user-provisioned infrastructure]**: You can install {product-title} on GCP infrastructure that you provide. You can use the provided Deployment Manager templates to assist with the installation. - -* **xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[Installing a cluster with shared VPC on user-provisioned infrastructure in GCP]**: You can use the provided Deployment Manager templates to create GCP resources in a shared VPC infrastructure. - -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[Installing a cluster on GCP in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on GCP in a restricted network with user-provisioned infrastructure. By creating an internal mirror of the installation release content, you can install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. - -[id="preparing-to-install-on-gcp-next-steps"] -== Next steps - -* xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] diff --git a/installing/installing_gcp/snippets b/installing/installing_gcp/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_gcp/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_gcp/uninstalling-cluster-gcp.adoc b/installing/installing_gcp/uninstalling-cluster-gcp.adoc deleted file mode 100644 index b8bcb4feaaeb..000000000000 --- a/installing/installing_gcp/uninstalling-cluster-gcp.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-gcp"] -= Uninstalling a cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-gcp - -toc::[] - -You can remove a cluster that you deployed to Google Cloud Platform (GCP). - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-deleting-sts-resources.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/_attributes b/installing/installing_ibm_cloud/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_cloud/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/images b/installing/installing_ibm_cloud/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_cloud/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc deleted file mode 100644 index 56652129d615..000000000000 --- a/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-ibm-cloud-installation-workflow"] -= Setting up the environment for an {product-title} installation -include::_attributes/common-attributes.adoc[] -:context: install-ibm-cloud-installation-workflow - -toc::[] - -include::modules/install-ibm-cloud-preparing-the-provisioner-node.adoc[leveloffset=+1] - -include::modules/install-ibm-cloud-configuring-the-public-subnet.adoc[leveloffset=+1] - -include::modules/ipi-install-retrieving-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/install-ibm-cloud-configuring-the-install-config-file.adoc[leveloffset=+1] - -include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+1] - -include::modules/ipi-install-root-device-hints.adoc[leveloffset=+1] - -include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+1] - -include::modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-following-the-installation.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc deleted file mode 100644 index 0fe412742583..000000000000 --- a/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-ibm-cloud-prerequisites"] -= Prerequisites -include::_attributes/common-attributes.adoc[] -:context: install-ibm-cloud - -toc::[] - -You can use installer-provisioned installation to install {product-title} on {ibmcloudBMRegProductName} nodes. This document describes the prerequisites and procedures when installing {product-title} on IBM Cloud nodes. - -[IMPORTANT] -==== -Red Hat supports IPMI and PXE on the provisioning network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. A provisioning network is required. -==== - -Installer-provisioned installation of {product-title} requires: - -* One node with {op-system-first} 8.x installed, for running the provisioner -* Three control plane nodes -* One routable network -* One provisioning network - -Before starting an installer-provisioned installation of {product-title} on {ibmcloudBMProductName}, address the following prerequisites and requirements. - -include::modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/modules b/installing/installing_ibm_cloud/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_cloud/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/snippets b/installing/installing_ibm_cloud/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_cloud/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/_attributes b/installing/installing_ibm_cloud_public/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_cloud_public/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc deleted file mode 100644 index ef4f6e2993a7..000000000000 --- a/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-iam-ibm-cloud"] -= Configuring IAM for IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: configuring-iam-ibm-cloud - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-iam-ibm-cloud-about-cco"] -.Additional resources -* xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator] - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-iam-ibm-cloud-refreshing-ids"] -.Additional resources -* xref:../../post_installation_configuration/cluster-tasks.adoc#refreshing-service-ids-ibm-cloud_post-install-cluster-tasks[Rotating API keys for IBM Cloud VPC] - -[id="next-steps_configuring-iam-ibm-cloud"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[Installing a cluster on IBM Cloud VPC with customizations] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/images b/installing/installing_ibm_cloud_public/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_ibm_cloud_public/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc deleted file mode 100644 index 3af38d584212..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-account"] -= Configuring an IBM Cloud account -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-account - -toc::[] - -Before you can install {product-title}, you must configure an IBM Cloud account. - -[id="prerequisites_installing-ibm-cloud-account"] -== Prerequisites - -* You have an IBM Cloud account with a subscription. You cannot install {product-title} on a free or trial IBM Cloud account. - -include::modules/quotas-and-limits-ibm-cloud.adoc[leveloffset=+1] - -[id="configuring-dns-resolution"] -== Configuring DNS resolution - -How you configure DNS resolution depends on the type of {product-title} cluster you are installing: - -* If you are installing a public cluster, you use IBM Cloud Internet Services (CIS). -* If you are installing a private cluster, you use IBM Cloud DNS Services (DNS Services) - -include::modules/installation-cis-ibm-cloud.adoc[leveloffset=+2] -include::modules/installation-dns-ibm-cloud.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-iam-policies-api-key.adoc[leveloffset=+1] -include::modules/installation-ibm-cloud-creating-api-key.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-regions.adoc[leveloffset=+1] - -[id="next-steps_installing-ibm-cloud-account"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC] diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc deleted file mode 100644 index fd1d36d2e204..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-customizations"] -= Installing a cluster on IBM Cloud VPC with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on IBM Cloud VPC. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -//.Additional resources - -//* ../../machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-ibm-cloud[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-customizations"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc deleted file mode 100644 index c71411a8717c..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-network-customizations"] -= Installing a cluster on IBM Cloud VPC with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program provisions on IBM Cloud VPC. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -[id="prerequisites_installing-ibm-cloud-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -//.Additional resources - -//* ../../machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-ibm-cloud[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-network-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-network-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-network-customizations"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc deleted file mode 100644 index 00f4a1b208a2..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-private"] -= Installing a private cluster on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-private"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-cloud-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-private-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-private-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-private"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc deleted file mode 100644 index f4acc0477718..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-vpc"] -= Installing a cluster on IBM Cloud VPC into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. The installation program provisions the rest of the required infrastructure, which you can then further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/installation-custom-ibm-cloud-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-vpc-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-vpc-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-vpc"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/modules b/installing/installing_ibm_cloud_public/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/installing/installing_ibm_cloud_public/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc deleted file mode 100644 index 1907b825a6f8..000000000000 --- a/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-cloud"] -= Preparing to install on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-cloud - -toc::[] - -The installation workflows documented in this section are for IBM Cloud VPC infrastructure environments. IBM Cloud Classic is not supported at this time. For more information about the difference between Classic and VPC infrastructures, see the IBM link:https://cloud.ibm.com/docs/cloud-infrastructure?topic=cloud-infrastructure-compare-infrastructure[documentation]. - -[id="prerequisites_preparing-to-install-on-ibm-cloud"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-ibm-cloud"] -== Requirements for installing {product-title} on IBM Cloud VPC - -Before installing {product-title} on IBM Cloud VPC, you must create a service account and configure an IBM Cloud account. See xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[Configuring an IBM Cloud account] for details about creating an account, enabling API services, configuring DNS, IBM Cloud account limits, and supported IBM Cloud VPC regions. - -You must manually manage your cloud credentials when installing a cluster to IBM Cloud VPC. Do this by configuring the Cloud Credential Operator (CCO) for manual mode before you install the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -[id="choosing-a-method-to-install-ocp-on-ibm-cloud"] -== Choosing a method to install {product-title} on IBM Cloud VPC - -You can install {product-title} on IBM Cloud VPC using installer-provisioned infrastructure. This process involves using an installation program to provision the underlying infrastructure for your cluster. Installing {product-title} on IBM Cloud VPC using user-provisioned infrastructure is not supported at this time. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-ibm-cloud-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on IBM Cloud VPC infrastructure that is provisioned by the {product-title} installation program by using one of the following methods: - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[Installing a customized cluster on IBM Cloud VPC]**: You can install a customized cluster on IBM Cloud VPC infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[Installing a cluster on IBM Cloud VPC with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[Installing a cluster on IBM Cloud VPC into an existing VPC]**: You can install {product-title} on an existing IBM Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing Virtual Private Cloud (VPC). You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -[id="next-steps_preparing-to-install-on-ibm-cloud"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[Configuring an IBM Cloud account] diff --git a/installing/installing_ibm_cloud_public/snippets b/installing/installing_ibm_cloud_public/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_cloud_public/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc deleted file mode 100644 index 3cec6a033333..000000000000 --- a/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-ibm-cloud"] -= Uninstalling a cluster on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-ibm-cloud - -toc::[] - -You can remove a cluster that you deployed to IBM Cloud VPC. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_power/_attributes b/installing/installing_ibm_power/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_power/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_power/images b/installing/installing_ibm_power/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_power/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_power/installing-ibm-power.adoc b/installing/installing_ibm_power/installing-ibm-power.adoc deleted file mode 100644 index 35256a6de020..000000000000 --- a/installing/installing_ibm_power/installing-ibm-power.adoc +++ /dev/null @@ -1,125 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power"] -= Installing a cluster on IBM Power -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -IBM Power infrastructure that you provision. - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/recommended-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[id="creating-machines-bare-metal-power"] -== Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on IBM Power infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -Follow either the steps to use an ISO image or network PXE booting to install {op-system} on the machines. - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/rhcos-enabling-multipath-day-1-power.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc b/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc deleted file mode 100644 index 99d092efd7a0..000000000000 --- a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc +++ /dev/null @@ -1,129 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-power"] -= Installing a cluster on IBM Power in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-power - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -IBM Power infrastructure that you provision in a restricted network. - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry for installation in a restricted network] and obtained the `imageContentSources` data for your version of {product-title}. -* Before you begin the installation process, you must move or remove any existing installation files. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are performed on a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/recommended-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[id="creating-machines-ibm-power-restricted-network"] -== Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on IBM Power infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -Follow either the steps to use an ISO image or network PXE booting to install {op-system} on the machines. - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/rhcos-enabling-multipath-day-1-power.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-change-management-state.adoc[leveloffset=+3] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_power/modules b/installing/installing_ibm_power/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_power/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc b/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc deleted file mode 100644 index fbc5222b7c09..000000000000 --- a/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-power"] -= Preparing to install on IBM Power -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-power - -toc::[] - -[id="preparing-to-install-on-ibm-power-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-power"] -== Choosing a method to install {product-title} on IBM Power - -You can install a cluster on IBM Power infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[Installing a cluster on IBM Power]**: You can install {product-title} on IBM Power infrastructure that you provision. - -* **xref:../../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[Installing a cluster on IBM Power in a restricted network]**: You can install {product-title} on IBM Power infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_power/snippets b/installing/installing_ibm_power/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_power/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/_attributes b/installing/installing_ibm_powervs/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_powervs/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc b/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc deleted file mode 100644 index 9c21524a0e22..000000000000 --- a/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-ibm-power-vs-workspace"] -= Creating an {ibmpowerProductName} Virtual Server workspace -include::_attributes/common-attributes.adoc[] -:context: creating-ibm-power-vs-workspace - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -include::modules/creating-ibm-power-vs-workspace-procedure.adoc[leveloffset=+1] - - -[id="next-steps_creating-ibm-power-vs-workspace"] -== Next steps -* xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[Installing a cluster on {ibmpowerProductName} Virtual Server with customizations] diff --git a/installing/installing_ibm_powervs/images b/installing/installing_ibm_powervs/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_powervs/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc b/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc deleted file mode 100644 index 3aaa32c147b6..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-account-power-vs"] -= Configuring an IBM Cloud account -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-account-power-vs - -toc::[] - -Before you can install {product-title}, you must configure an IBM Cloud account. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-cloud-account-power-vs"] -== Prerequisites - -* You have an IBM Cloud account with a subscription. You cannot install {product-title} on a free or on a trial IBM Cloud account. - -include::modules/quotas-and-limits-ibm-power-vs.adoc[leveloffset=+1] - -[id="configuring-dns-resolution-powervs"] -== Configuring DNS resolution - -How you configure DNS resolution depends on the type of {product-title} cluster you are installing: - -* If you are installing a public cluster, you use IBM Cloud Internet Services (CIS). -* If you are installing a private cluster, you use IBM Cloud DNS Services (DNS Services). - -include::modules/installation-cis-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-iam-policies-api-key.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-creating-api-key.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-regions.adoc[leveloffset=+1] - -[id="next-steps_installing-ibm-cloud-account-power-vs"] -== Next steps -* xref:../../installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc#creating-ibm-power-vs-workspace[Creating an {ibmpowerProductName} Virtual Server workspace] diff --git a/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc b/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc deleted file mode 100644 index 0fb0e4bcedd8..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power-vs-customizations"] -= Installing a cluster on {ibmpowerProductName} Virtual Server with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power-vs-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on {ibmpowerProductName} Virtual Server. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-powervs-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-power-vs-customizations"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc b/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc deleted file mode 100644 index b06b4d76a556..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power-vs-private-cluster"] -= Installing a private cluster on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power-vs-private-cluster - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC and {ibmpowerProductName} Virtual Server Workspace. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-power-vs-private-cluster"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-private-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-private-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - - -[id="next-steps_installing-ibm-power-vs-private-cluster"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc b/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc deleted file mode 100644 index 31daad2354b2..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-powervs-vpc"] -= Installing a cluster on {ibmpowerProductName} Virtual Server into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-powervs-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. The installation program provisions the rest of the required infrastructure, which you can then further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-powervs-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-powervs-vpc-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-powervs-vpc-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-powervs-vpc"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc b/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc deleted file mode 100644 index d603c9e2c5cd..000000000000 --- a/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-power-vs"] -= Installing a cluster on {ibmpowerProductName} Virtual Server in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-power-vs - -toc::[] - -In {product-title} {product-version}, you can install a cluster on IBM Cloud VPC in a restricted network by creating an internal mirror of the installation release content on an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-power-vs-restricted"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in IBM Cloud VPC. When installing a cluster in a restricted network, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-restricted-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-restricted-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-power-vs-restricted"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] \ No newline at end of file diff --git a/installing/installing_ibm_powervs/modules b/installing/installing_ibm_powervs/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_powervs/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc b/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc deleted file mode 100644 index 75542226e3ac..000000000000 --- a/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc +++ /dev/null @@ -1,58 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-power-vs"] -= Preparing to install on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-power-vs - -toc::[] - -The installation workflows documented in this section are for {ibmpowerProductName} Virtual Server infrastructure environments. - -[id="prerequisites_preparing-to-install-on-ibm-power-vs"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="requirements-for-installing-ocp-on-ibm-power-vs"] -== Requirements for installing {product-title} on {ibmpowerProductName} Virtual Server - -Before installing {product-title} on {ibmpowerProductName} Virtual Server, you must create a service account and configure an IBM Cloud account. See xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[Configuring an IBM Cloud account] for details about creating an account, configuring DNS and supported {ibmpowerProductName} Virtual Server regions. - -You must manually manage your cloud credentials when installing a cluster to {ibmpowerProductName} Virtual Server. Do this by configuring the Cloud Credential Operator (CCO) for manual mode before you install the cluster. - -[id="choosing-a-method-to-install-ocp-on-ibm-power-vs"] -== Choosing a method to install {product-title} on {ibmpowerProductName} Virtual Server - -You can install {product-title} on {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure. This process involves using an installation program to provision the underlying infrastructure for your cluster. Installing {product-title} on {ibmpowerProductName} Virtual Server using user-provisioned infrastructure is not supported at this time. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-power-vs-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on {ibmpowerProductName} Virtual Server infrastructure that is provisioned by the {product-title} installation program by using one of the following methods: - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[Installing a customized cluster on {ibmpowerProductName} Virtual Server]**: You can install a customized cluster on {ibmpowerProductName} Virtual Server infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc#installing-ibm-powervs-vpc[Installing a cluster on {ibmpowerProductName} Virtual Server into an existing VPC]**: You can install {product-title} on {ibmpowerProductName} Virtual Server into an existing Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc#installing-ibm-power-vs-private-cluster[Installing a private cluster on {ibmpowerProductName} Virtual Server]**: You can install a private cluster on {ibmpowerProductName} Virtual Server. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc#installing-restricted-networks-ibm-power-vs[Installing a cluster on {ibmpowerProductName} Virtual Server in a restricted network]**: You can install {product-title} on {ibmpowerProductName} Virtual Server on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-ibm-cloud-refreshing-ids"] - -.Additional resources -* xref:../../post_installation_configuration/cluster-tasks.adoc#refreshing-service-ids-ibm-cloud_post-install-cluster-tasks[Rotating API keys] - -[id="next-steps_preparing-to-install-on-ibm-power-vs"] -== Next steps -* xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[Configuring an IBM Cloud account] \ No newline at end of file diff --git a/installing/installing_ibm_powervs/snippets b/installing/installing_ibm_powervs/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_powervs/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc b/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc deleted file mode 100644 index ffcbdb93a46a..000000000000 --- a/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-ibm-power-vs"] -= Uninstalling a cluster on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-ibm-power-vs - -toc::[] - -You can remove a cluster that you deployed to {ibmpowerProductName} Virtual Server. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] \ No newline at end of file diff --git a/installing/installing_ibm_z/_attributes b/installing/installing_ibm_z/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_z/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_z/images b/installing/installing_ibm_z/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_z/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_z/installing-ibm-z-kvm.adoc b/installing/installing_ibm_z/installing-ibm-z-kvm.adoc deleted file mode 100644 index 46e24611c254..000000000000 --- a/installing/installing_ibm_z/installing-ibm-z-kvm.adoc +++ /dev/null @@ -1,144 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-z-kvm"] -= Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-z-kvm - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -[NOTE] -==== -While this document refers only to {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* You provisioned a {op-system-base} Kernel Virtual Machine (KVM) system that is hosted on the logical partition (LPAR) and based on {op-system-base} 8.6 or later. See link:https://access.redhat.com/support/policy/updates/errata#RHEL8_and_9_Life_Cycle[Red Hat Enterprise Linux 8 and 9 Life Cycle]. - - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-requirements-user-infra-ibm-z-kvm.adoc[leveloffset=+1] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-recommended-host-practices"] -.Additional resources - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-chrony-time-service"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc[leveloffset=+1] - -include::modules/ibm-z-secure-execution.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_Linux-as-an-IBM-Secure-Execution-host-or-guest"] -.Additional resources - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution[Introducing IBM Secure Execution for Linux] - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=ibmz-secure-execution[Linux as an IBM Secure Execution host or guest] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z-kvm"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-remote-health-monitoring"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within OpenShift4 nodes without SSH]. - -[id="next-steps_ibmz-kvm"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. - -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_z/installing-ibm-z.adoc b/installing/installing_ibm_z/installing-ibm-z.adoc deleted file mode 100644 index efef77e3ae95..000000000000 --- a/installing/installing_ibm_z/installing-ibm-z.adoc +++ /dev/null @@ -1,147 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-z"] -= Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-z - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -[NOTE] -==== -While this document refers only to {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/preferred-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-requirements"] -.Additional resources - -* See link:https://www.ibm.com/docs/en/zvm/7.1?topic=networks-bridging-hipersockets-lan-zvm-virtual-switch[Bridging a HiperSockets LAN with a z/VM Virtual Switch] in IBM Documentation. - -* See link:http://public.dhe.ibm.com/software/dw/linux390/perf/zvm_hpav00.pdf[Scaling HyperPAV alias devices on Linux guests on z/VM] for performance optimization. - -* See link:https://www.vm.ibm.com/library/presentations/lparperf.pdf[Topics in LPAR performance] for LPAR weight management and entitlements. - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-chrony-time-service"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-user-infra-machines-iso.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-remote-health-monitoring"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within OpenShift4 nodes without SSH]. - -[id="next-steps_ibmz-vm"] -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. - -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc b/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc deleted file mode 100644 index 69529ec1d7ab..000000000000 --- a/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc +++ /dev/null @@ -1,147 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-z-kvm"] -= Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-z-kvm - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted network. - -[NOTE] -==== -While this document refers to only {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -* You must move or remove any existing installation files, before you begin the installation process. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are done from a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* You provisioned a {op-system-base} Kernel Virtual Machine (KVM) system that is hosted on the logical partition (LPAR) and based on {op-system-base} 8.6 or later. See link:https://access.redhat.com/support/policy/updates/errata#RHEL8_and_9_Life_Cycle[Red Hat Enterprise Linux 8 and 9 Life Cycle]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-requirements-user-infra-ibm-z-kvm.adoc[leveloffset=+1] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-restricted-recommended-host-practices"] -.Additional resources - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-network-user-infra"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc[leveloffset=+1] - -include::modules/ibm-z-secure-execution.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_Linux-as-an-IBM-Secure-Execution-host-or-guest-restricted"] -.Additional resources - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution[Introducing IBM Secure Execution for Linux] - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=ibmz-secure-execution[Linux as an IBM Secure Execution host or guest] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z-kvm-restricted"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-restricted-sosreport"] -.Additional resources - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within {product-title} version 4 nodes without SSH]. - -[id="next-steps_ibmz-kvm-restricted"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc b/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc deleted file mode 100644 index 68550f8a3746..000000000000 --- a/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc +++ /dev/null @@ -1,146 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-z"] -= Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-z - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted network. - -[NOTE] -==== -While this document refers to only {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry for installation in a restricted network] and obtained the `imageContentSources` data for your version of {product-title}. -* Before you begin the installation process, you must move or remove any existing installation files. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are done from a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/preferred-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://www.ibm.com/docs/en/zvm/7.1?topic=networks-bridging-hipersockets-lan-zvm-virtual-switch[Bridging a HiperSockets LAN with a z/VM Virtual Switch] in IBM Documentation. - -* See link:http://public.dhe.ibm.com/software/dw/linux390/perf/zvm_hpav00.pdf[Scaling HyperPAV alias devices on Linux guests on z/VM] for performance optimization. - -* See link:https://www.vm.ibm.com/library/presentations/lparperf.pdf[Topics in LPAR performance] for LPAR weight management and entitlements. - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_Configure-nbde-ibm-z-restricted"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-user-infra-machines-iso.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within {product-title} version 4 nodes without SSH]. - -[id="next-steps_ibmz-restricted"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_z/modules b/installing/installing_ibm_z/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_z/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc b/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc deleted file mode 100644 index b06398172c81..000000000000 --- a/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-z-kvm"] -= Preparing to install with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-z-kvm - -toc::[] - -[id="preparing-to-install-on-ibm-z-kvm-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-z-kvm"] -== Choosing a method to install {product-title} with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} - -You can install a cluster with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[Installing a cluster with RHEL KVM on {ibmzProductName} and {linuxoneProductName}]**: You can install {product-title} with KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -* **xref:../../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} in a restricted network]**: You can install {product-title} with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc b/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc deleted file mode 100644 index 336a00f1fe74..000000000000 --- a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-z"] -= Preparing to install with z/VM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-z - -toc::[] - -[id="preparing-to-install-on-ibm-z-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-z"] -== Choosing a method to install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} - -You can install a cluster with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName}]**: You can install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -* **xref:../../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} in a restricted network]**: You can install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_z/snippets b/installing/installing_ibm_z/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_z/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_nutanix/_attributes b/installing/installing_nutanix/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_nutanix/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_nutanix/images b/installing/installing_nutanix/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_nutanix/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc b/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc deleted file mode 100644 index b9185806c1fa..000000000000 --- a/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-nutanix-installer-provisioned"] -= Installing a cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: installing-nutanix-installer-provisioned -:platform: Nutanix - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your Nutanix instance with two methods: - -* Using the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}] hosted at link:http://console.redhat.com[console.redhat.com]. This method requires no setup for the installer, and is ideal for connected environments like Nutanix. Installing with the {ai-full} also provides integration with Nutanix, enabling autoscaling. See xref:../../installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc#installing-on-prem-assisted[Installing an on-premise cluster using the {ai-full}] for additional details. - -* Using installer-provisioned infrastructure. Use the procedures in the following sections to use installer-provisioned infrastructure. Installer-provisioned infrastructure is ideal for installing in environments with air-gapped/restricted networks. - -== Prerequisites - -* You have reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* The installation program requires access to port 9440 on Prism Central and Prism Element. You verified that port 9440 is accessible. -* If you use a firewall, you have met these prerequisites: -** You confirmed that port 9440 is accessible. Control plane nodes must be able to reach Prism Central and Prism Element on port 9440 for the installation to succeed. -** You configured the firewall to xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[grant access] to the sites that {product-title} requires. This includes the use of Telemetry. -* If your Nutanix environment is using the default self-signed SSL certificate, replace it with a certificate that is signed by a CA. The installation program requires a valid CA-signed certificate to access to the Prism Central API. For more information about replacing the self-signed certificate, see the https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_1:mul-security-ssl-certificate-pc-t.html[Nutanix AOS Security Guide]. -+ -[IMPORTANT] -==== -Use 2048-bit certificates. The installation fails if you use 4096-bit certificates with Prism Central 2022.x. -==== -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/nutanix-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-nutanix-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-nutanix-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/manually-configure-iam-nutanix.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -== Configuring the default storage container -After you install the cluster, you must install the Nutanix CSI Operator and configure the default storage container for the cluster. - -For more information, see the Nutanix documentation for link:https://opendocs.nutanix.com/openshift/operators/csi/[installing the CSI Operator] and link:https://opendocs.nutanix.com/openshift/install/ipi/#openshift-image-registry-configuration[configuring registry storage]. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -== Next steps -* xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] diff --git a/installing/installing_nutanix/installing-nutanix-three-node.adoc b/installing/installing_nutanix/installing-nutanix-three-node.adoc deleted file mode 100644 index 611d2d75be0b..000000000000 --- a/installing/installing_nutanix/installing-nutanix-three-node.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-nutanix-three-node"] -= Installing a three-node cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: installing-nutanix-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Nutanix. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Installing a cluster on Nutanix] diff --git a/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc b/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc deleted file mode 100644 index b5d46d457fa0..000000000000 --- a/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-nutanix-installer-provisioned"] -= Installing a cluster on Nutanix in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-nutanix-installer-provisioned - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Nutanix infrastructure in a restricted network by creating an internal mirror of the installation release content. - -== Prerequisites - -* You have reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* The installation program requires access to port 9440 on Prism Central and Prism Element. You verified that port 9440 is accessible. -* If you use a firewall, you have met these prerequisites: -** You confirmed that port 9440 is accessible. Control plane nodes must be able to reach Prism Central and Prism Element on port 9440 for the installation to succeed. -** You configured the firewall to xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[grant access] to the sites that {product-title} requires. This includes the use of Telemetry. -* If your Nutanix environment is using the default self-signed SSL/TLS certificate, replace it with a certificate that is signed by a CA. The installation program requires a valid CA-signed certificate to access to the Prism Central API. For more information about replacing the self-signed certificate, see the https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_1:mul-security-ssl-certificate-pc-t.html[Nutanix AOS Security Guide]. -+ -[IMPORTANT] -==== -Use 2048-bit certificates. The installation fails if you use 4096-bit certificates with Prism Central 2022.x. -==== -* You have a container image registry, such as Red Hat Quay. If you do not already have a registry, you can create a mirror registry using xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[_mirror registry for Red Hat OpenShift_]. -* You have used the xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[oc-mirror OpenShift CLI (oc) plugin] to mirror all of the required {product-title} content and other images, including the Nutanix CSI Operator, to your mirror registry. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-adding-nutanix-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-nutanix-download-rhcos.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-nutanix-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/manually-configure-iam-nutanix.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -== Post installation -Complete the following steps to complete the configuration of your cluster. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] -include::modules/oc-mirror-updating-restricted-cluster-manifests.adoc[leveloffset=+2] -include::modules/registry-configuring-storage-nutanix.adoc[leveloffset=+2] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -== Next steps -* xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] diff --git a/installing/installing_nutanix/modules b/installing/installing_nutanix/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_nutanix/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc b/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc deleted file mode 100644 index a5a4596b86b1..000000000000 --- a/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-nutanix"] -= Preparing to install on Nutanix -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-nutanix - -toc::[] - -Before you install an {product-title} cluster, be sure that your Nutanix environment meets the following requirements. - -include::modules/installation-nutanix-infrastructure.adoc[leveloffset=+1] -include::modules/installation-nutanix-installer-infra-reqs.adoc[leveloffset=+1] -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/installing/installing_nutanix/snippets b/installing/installing_nutanix/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_nutanix/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc b/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc deleted file mode 100644 index c06ce809a46b..000000000000 --- a/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-nutanix"] -= Uninstalling a cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-nutanix - -toc::[] - -You can remove a cluster that you deployed to Nutanix. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_on_prem_assisted/_attributes b/installing/installing_on_prem_assisted/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_on_prem_assisted/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/assisted-installer-installing.adoc b/installing/installing_on_prem_assisted/assisted-installer-installing.adoc deleted file mode 100644 index bd6b65dcf992..000000000000 --- a/installing/installing_on_prem_assisted/assisted-installer-installing.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-with-ai"] -= Installing with the Assisted Installer -include::_attributes/common-attributes.adoc[] -:context: assisted-installer-installing - -toc::[] - -After you ensure the cluster nodes and network requirements are met, you can begin installing the cluster. - -include::modules/assisted-installer-pre-installation-considerations.adoc[leveloffset=+1] - -include::modules/assisted-installer-setting-the-cluster-details.adoc[leveloffset=+1] - -include::modules/assisted-installer-configuring-host-network-interfaces.adoc[leveloffset=+1] - -[role="_additional_resources"] -.Additional resources -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Configuring network interfaces] - -* link:http://nmstate.io[NMState version 2.1.4] - -include::modules/assisted-installer-adding-hosts-to-the-cluster.adoc[leveloffset=+1] - -include::modules/installing-with-usb-media.adoc[leveloffset=+1] - -include::modules/assisted-installer-booting-with-a-usb-drive.adoc[leveloffset=+1] - -include::modules/install-booting-from-an-iso-over-http-redfish.adoc[leveloffset=+1] - -[role="_additional_resources"] -.Additional resources - -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing]. - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[Firmware requirements for installing with virtual media] - -include::modules/assisted-installer-configuring-hosts.adoc[leveloffset=+1] - -include::modules/assisted-installer-configuring-networking.adoc[leveloffset=+1] - -include::modules/assisted-installer-installing-the-cluster.adoc[leveloffset=+1] - -include::modules/assisted-installer-completing-the-installation.adoc[leveloffset=+1] - - -[role="_additional_resources"] -[id="ai-saas-installing-additional-resources_{context}"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Installing the OpenShift CLI]. - -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-logging-in_cli-developer-commands[Logging in to the OpenShift CLI] - -* xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] - -* xref:../../post_installation_configuration/preparing-for-users.adoc#removing-kubeadmin_post-install-preparing-for-users[Removing the kubeadmin user] diff --git a/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc b/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc deleted file mode 100644 index 2af77e2eabcf..000000000000 --- a/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-with-ai"] -= Preparing to install with the Assisted Installer -include::_attributes/common-attributes.adoc[] -:context: assisted-installer-preparing-to-install - -toc::[] - -Before installing a cluster, you must ensure the cluster nodes and network meet the requirements. - -[id="assisted-installer-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you must xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configure it] so that {ai-full} can access the resources it requires to function. - -include::modules/assisted-installer-assisted-installer-prerequisites.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="ai-saas-preparing--to-install-additional-resources_{context}"] -== Additional resources - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[Firmware requirements for installing with virtual media] - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#network-requirements-increase-mtu_ipi-install-prerequisites[Increase the network MTU] diff --git a/installing/installing_on_prem_assisted/images b/installing/installing_on_prem_assisted/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_on_prem_assisted/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc b/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc deleted file mode 100644 index 336ca53b4041..000000000000 --- a/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-on-prem-assisted"] -= Installing an on-premise cluster using the {ai-full} -include::_attributes/common-attributes.adoc[] -:context: installing-on-prem-assisted - -toc::[] - -You can install {product-title} on on-premise hardware or on-premise VMs using the {ai-full}. Installing {product-title} using the {ai-full} supports x86_64, AArch64, ppc64le, and s390x CPU architectures. - -[NOTE] -==== -Installing {product-title} on {ibmzProductName} (s390x) is supported only with RHEL KVM installations. -==== - -include::modules/assisted-installer-using-the-assisted-installer.adoc[leveloffset=+1] - -[id="assisted-installer-api-support-policy"] -== API support for the {ai-full} - -Supported APIs for the {ai-full} are stable for a minimum of three months from the announcement of deprecation. diff --git a/installing/installing_on_prem_assisted/modules b/installing/installing_on_prem_assisted/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_on_prem_assisted/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/snippets b/installing/installing_on_prem_assisted/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_on_prem_assisted/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_openstack/_attributes b/installing/installing_openstack/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_openstack/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_openstack/images b/installing/installing_openstack/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_openstack/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc b/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc deleted file mode 100644 index 83cb9f26fe91..000000000000 --- a/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-cloud-config-reference"] -= OpenStack Cloud Controller Manager reference guide -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-cloud-config-reference - -toc::[] - -include::modules/nw-openstack-external-ccm.adoc[leveloffset=+1] -include::modules/cluster-cloud-controller-config-osp.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/installing-openstack-installer-custom.adoc b/installing/installing_openstack/installing-openstack-installer-custom.adoc deleted file mode 100644 index 48c5ae2c18cb..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-custom.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-custom"] -= Installing a cluster on OpenStack with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-custom - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on -{rh-openstack-first}. To customize the installation, modify parameters in the `install-config.yaml` before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have a storage service installed in {rh-openstack}, such as block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* You have the metadata service enabled in {rh-openstack}. - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-registry-osp-creating-custom-pvc.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -See xref:../installing_openstack/installing-openstack-installer-custom.adoc#installation-configuration-parameters_installing-openstack-installer-custom[*Installation configuration parameters* section] for more information about the available parameters. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-external-lb-config.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc b/installing/installing_openstack/installing-openstack-installer-kuryr.adoc deleted file mode 100644 index c8e26373a009..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-kuryr"] -= Installing a cluster on OpenStack with Kuryr -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-kuryr - -toc::[] - -:FeatureName: Kuryr -include::modules/deprecated-feature.adoc[leveloffset=+1] - -In {product-title} version {product-version}, you can install a customized cluster on -{rh-openstack-first} that uses Kuryr SDN. To customize the installation, modify parameters in the `install-config.yaml` before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have a storage service installed in {rh-openstack}, such as block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] - -You can xref:../../networking/load-balancing-openstack.adoc#installation-osp-kuryr-octavia-configure[configure your cluster to use the Octavia OVN driver] after your {rh-openstack} cloud is upgraded from version 13 to version 16. - -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-external-lb-config.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-kuryr-port-pools.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-settings-installing.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc b/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc deleted file mode 100644 index 702d8369c8b4..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-ovs-dpdk"] -= Installing a cluster on OpenStack that supports OVS-DPDK-connected compute machines -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-ovs-dpdk - -toc::[] - -If your {rh-openstack-first} deployment has Open vSwitch with the Data Plane Development Kit (OVS-DPDK) enabled, you can install an {product-title} cluster on it. Clusters that run on such {rh-openstack} deployments use OVS-DPDK features by providing access to link:https://doc.dpdk.org/guides/prog_guide/poll_mode_drv.html[poll mode drivers]. - -== Prerequisites - -* Review details about the -xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] -processes. -** Verify that {product-title} {product-version} is compatible with your {rh-openstack} version by using the "Supported platforms for OpenShift clusters" section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. - -* Have a storage service installed in {rh-openstack}, like block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-registry} cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -* Have the metadata service enabled in {rh-openstack}. - -* Plan your {rh-openstack} OVS-DPDK deployment by referring to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/assembly_ovsdpdk_parameters[Planning your OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -* Configure your {rh-openstack} OVS-DPDK deployment according to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure[Configuring an OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -** You must complete link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure#p-ovs-dpdk-flavor-deploy-instance[Creating a flavor and deploying an instance for OVS-DPDK] before you install a cluster on {rh-openstack}. - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -The cluster is operational. Before you can add OVS-DPDK compute machines though, you must perform additional tasks. - -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-binding-vfio-pci.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-exposing-host-interface.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../networking/multiple_networks/configuring-additional-network.adoc#nw-multus-host-device-object_configuring-additional-network[Creating an additional network attachment with the Cluster Network Operator] - -The cluster is installed and prepared for configuration. You must now perform the OVS-DPDK configuration tasks in <>. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-installer-ovs-dpdk"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-openstack-installer-ovs-dpdk"] -== Next steps - -* To complete OVS-DPDK configuration for your cluster, xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure huge pages support]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-restricted.adoc b/installing/installing_openstack/installing-openstack-installer-restricted.adoc deleted file mode 100644 index 2fb483d74035..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-restricted.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-restricted"] -= Installing a cluster on OpenStack in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-restricted - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} in a restricted network by creating an internal mirror of the installation release content. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* You have the metadata service enabled in {rh-openstack}. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-creating-image-restricted.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-osp-restricted-config-yaml.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer.adoc b/installing/installing_openstack/installing-openstack-installer.adoc deleted file mode 100644 index 3dbb132d0a41..000000000000 --- a/installing/installing_openstack/installing-openstack-installer.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer"] -= Installing a cluster on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first}. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* On {rh-openstack}, you have access to an external network that does not overlap these CIDR ranges: -** `10.0.0.0/16` -** `172.30.0.0/16` -** `10.128.0.0/14` -+ -If the external network overlaps these ranges, go to xref:./installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_openstack/installing-openstack-load-balancing.adoc b/installing/installing_openstack/installing-openstack-load-balancing.adoc deleted file mode 100644 index 6b1b879a9508..000000000000 --- a/installing/installing_openstack/installing-openstack-load-balancing.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-load-balancing"] -= Load balancing deployments on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-load-balancing - -toc::[] - -include::modules/installation-osp-balancing-external-loads.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/installing-openstack-nfv-preparing.adoc b/installing/installing_openstack/installing-openstack-nfv-preparing.adoc deleted file mode 100644 index 637fc7a2bced..000000000000 --- a/installing/installing_openstack/installing-openstack-nfv-preparing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-nfv-preparing"] -= Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-nfv-preparing - -toc::[] - -Before you install a {product-title} cluster that uses single-root I/O virtualization (SR-IOV) or Open vSwitch with the Data Plane Development Kit (OVS-DPDK) on {rh-openstack-first}, you must understand the requirements for each technology and then perform preparatory tasks. - -include::modules/installation-openstack-nfv-requirements.adoc[leveloffset=+1] -include::modules/installation-openstack-sr-iov-requirements.adoc[leveloffset=+2] -include::modules/installation-openstack-ovs-dpdk-requirements.adoc[leveloffset=+2] - -[id="installing-openstack-nfv-preparing-tasks-sr-iov"] -== Preparing to install a cluster that uses SR-IOV - -You must configure {rh-openstack} before you install a cluster that uses SR-IOV on it. - -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+2] - -[id="installing-openstack-nfv-preparing-tasks-ovs-dpdk"] -== Preparing to install a cluster that uses OVS-DPDK - -You must configure {rh-openstack} before you install a cluster that uses SR-IOV on it. - -* Complete link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure#p-ovs-dpdk-flavor-deploy-instance[Creating a flavor and deploying an instance for OVS-DPDK] before you install a cluster on {rh-openstack}. - -After you perform pre-installation tasks, install your cluster by following the most relevant {product-title} on {rh-openstack} installation instructions. Then, perform the tasks under "Next steps" on this page. - -[id="next-steps_installing-openstack-nfv-preparing"] -== Next steps - -* For either type of deployment: -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure the Node Tuning Operator with huge pages support]. -* To complete SR-IOV configuration after you deploy your cluster: -** xref:../../networking/hardware_networks/installing-sriov-operator.adoc#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -** xref:../../networking/hardware_networks/configuring-sriov-device.adoc#nw-sriov-networknodepolicy-object_configuring-sriov-device[Configure your SR-IOV network device]. -** xref:../../machine_management/creating_machinesets/creating-machineset-osp.adoc#machineset-yaml-osp-sr-iov_creating-machineset-osp[Create SR-IOV compute machines]. -* Consult the following references after you deploy your cluster to improve its performance: -** xref:../../networking/hardware_networks/using-dpdk-and-rdma.adoc#nw-openstack-ovs-dpdk-testpmd-pod_using-dpdk-and-rdma[A test pod template for clusters that use OVS-DPDK on OpenStack]. -** xref:../../networking/hardware_networks/add-pod.adoc#nw-openstack-sr-iov-testpmd-pod_add-pod[A test pod template for clusters that use SR-IOV on OpenStack]. -** xref:../../scalability_and_performance/cnf-create-performance-profiles.adoc#installation-openstack-ovs-dpdk-performance-profile_cnf-create-performance-profiles[A performance profile template for clusters that use OVS-DPDK on OpenStack]. diff --git a/installing/installing_openstack/installing-openstack-troubleshooting.adoc b/installing/installing_openstack/installing-openstack-troubleshooting.adoc deleted file mode 100644 index ed463008ca06..000000000000 --- a/installing/installing_openstack/installing-openstack-troubleshooting.adoc +++ /dev/null @@ -1,53 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-troubleshooting - -toc::[] - -//Very much a WIP. Chop up sections into mod docs as they're finalized. - -In the event of a failure in {product-title} on OpenStack installation, you can recover by understanding the likely failure modes and then starting to troubleshoot the problem. - -== View OpenStack instance logs - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. In a terminal window, run `openstack console log show ` - -The console logs appear. - -== SSH access to an instance - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. Get the IP address of the node on the private network: -+ -[source,terminal] ----- -$ openstack server list | grep master ----- -+ -.Example output -[source,terminal] ----- -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | ----- - -. Connect to the instance from the master that holds the API VIP (and API FIP) as a jumpbox: -+ -[source,terminal] ----- -$ ssh -J core@${FIP} core@ ----- diff --git a/installing/installing_openstack/installing-openstack-user-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-kuryr.adoc deleted file mode 100644 index 00838b18c266..000000000000 --- a/installing/installing_openstack/installing-openstack-user-kuryr.adoc +++ /dev/null @@ -1,93 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-kuryr"] -= Installing a cluster on OpenStack with Kuryr on your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-kuryr - -toc::[] - -:FeatureName: Kuryr -include::modules/deprecated-feature.adoc[leveloffset=+1] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine from which you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-kuryr-port-pools.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-settings-installing.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc deleted file mode 100644 index 81ed4e91622d..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov-kuryr"] -= Installing a cluster on OpenStack with Kuryr on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov-kuryr - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses SR-IOV networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* Your network configuration does not rely on a provider network. Provider networks are not supported. -* You have a {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] - -To finish configuring SR-IOV for your cluster, complete the SR-IOV-related "Next steps" that follow the installation process. - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../post_installation_configuration/network-configuration.adoc#networking-osp-preparing-for-sr-iov_post-install-network-configuration[Prepare the cluster for SR-IOV]. -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Install the performance operator with huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.adoc#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov.adoc deleted file mode 100644 index 757828fc4c3b..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc +++ /dev/null @@ -1,101 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov"] -= Installing a cluster on OpenStack on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses single-root input/output virtualization (SR-IOV) networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* Your network configuration does not rely on a provider network. Provider networks are not supported. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -Optionally, you can use the `inventory.yaml` file that you created to customize your installation. For example, you can deploy a cluster that uses bare metal machines. - -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -The cluster is operational. Before you can configure it for SR-IOV networks though, you must perform additional tasks. - -include::modules/networking-osp-preparing-for-sr-iov.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+2] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+2] - -[NOTE] -==== -After you apply the machine config to the machine pool, you can xref:../../post_installation_configuration/machine-configuration-tasks.adoc#checking-mco-status_post-install-machine-configuration-tasks[watch the machine config pool status] to see when the machines are available. -==== - -// TODO: If bullet one of Next steps is truly required for this flow, these topics (in full or in part) could be added here rather than linked to. -// This document is quite long, however, and operator installation and configuration should arguably remain in their their own assemblies. - -The cluster is installed and prepared for SR-IOV configuration. You must now perform the SR-IOV configuration tasks in "Next steps". - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cluster-telemetry"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-user-sr-iov"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-user-sr-iov"] -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.adoc#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -** xref:../../networking/hardware_networks/configuring-sriov-device.adoc#nw-sriov-networknodepolicy-object_configuring-sriov-device[Configure your SR-IOV network device]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user.adoc b/installing/installing_openstack/installing-openstack-user.adoc deleted file mode 100644 index 141336fa1b3f..000000000000 --- a/installing/installing_openstack/installing-openstack-user.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user"] -= Installing a cluster on OpenStack on your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine from which you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] - -Optionally, you can use the `inventory.yaml` file that you created to customize your installation. For example, you can deploy a cluster that uses bare metal machines. - -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/modules b/installing/installing_openstack/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_openstack/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_openstack/preparing-to-install-on-openstack.adoc b/installing/installing_openstack/preparing-to-install-on-openstack.adoc deleted file mode 100644 index 8ecf10b5b272..000000000000 --- a/installing/installing_openstack/preparing-to-install-on-openstack.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-openstack"] -= Preparing to install on OpenStack -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-openstack - -toc::[] - -You can install {product-title} on {rh-openstack-first}. -ifdef::openshift-origin[{product-title} version {product-version} supports OpenStack Train.] - -[id="preparing-to-install-on-openstack-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-openstack"] -== Choosing a method to install {product-title} on OpenStack - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-openstack-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on {rh-openstack-first} infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations]**: You can install a customized cluster on {rh-openstack}. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[Installing a cluster on OpenStack with Kuryr]**: You can install a customized {product-title} cluster on {rh-openstack} that uses Kuryr SDN. Kuryr and {product-title} integration is primarily designed for {product-title} clusters running on {rh-openstack} VMs. Kuryr improves the network performance by plugging {product-title} pods into {rh-openstack} SDN. In addition, it provides interconnectivity between pods and {rh-openstack} virtual instances. - -* **xref:../../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[Installing a cluster on OpenStack in a restricted network]**: You can install {product-title} on {rh-openstack} in a restricted or disconnected network by creating an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. - -[id="choosing-an-method-to-install-ocp-on-openstack-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on {rh-openstack} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[Installing a cluster on OpenStack on your own infrastructure]**: You can install {product-title} on user-provisioned {rh-openstack} infrastructure. By using this installation method, you can integrate your cluster with existing infrastructure and modifications. For installations on user-provisioned infrastructure, you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. You can use the provided Ansible playbooks to assist with the deployment process. - -* **xref:../../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[Installing a cluster on OpenStack with Kuryr on your own infrastructure]**: You can install {product-title} on user-provisioned {rh-openstack} infrastructure that uses Kuryr SDN. - -include::modules/security-osp-validating-certificates.adoc[leveloffset=+1] - -include::modules/security-osp-validating-certificates-manually.adoc[leveloffset=+2] - diff --git a/installing/installing_openstack/snippets b/installing/installing_openstack/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_openstack/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_openstack/uninstalling-cluster-openstack.adoc b/installing/installing_openstack/uninstalling-cluster-openstack.adoc deleted file mode 100644 index 05b70252f927..000000000000 --- a/installing/installing_openstack/uninstalling-cluster-openstack.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-openstack"] -= Uninstalling a cluster on OpenStack -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-openstack - -toc::[] - -You can remove a cluster that you deployed to {rh-openstack-first}. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/uninstalling-openstack-user.adoc b/installing/installing_openstack/uninstalling-openstack-user.adoc deleted file mode 100644 index e018cc6b1bf3..000000000000 --- a/installing/installing_openstack/uninstalling-openstack-user.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-openstack-user"] -= Uninstalling a cluster on {rh-openstack} from your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: uninstalling-openstack-user - -toc::[] - -You can remove a cluster that you deployed to {rh-openstack-first} on user-provisioned infrastructure. - -// include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-uninstall-infra.adoc[leveloffset=+1] diff --git a/installing/installing_platform_agnostic/_attributes b/installing/installing_platform_agnostic/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_platform_agnostic/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/images b/installing/installing_platform_agnostic/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_platform_agnostic/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc b/installing/installing_platform_agnostic/installing-platform-agnostic.adoc deleted file mode 100644 index 1a2386a03a63..000000000000 --- a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc +++ /dev/null @@ -1,119 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-platform-agnostic"] -= Installing a cluster on any platform -include::_attributes/common-attributes.adoc[] -:context: installing-platform-agnostic - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -any infrastructure that you provision, including virtualization and cloud environments. - -[IMPORTANT] -==== -Review the information in the link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you attempt to install an {product-title} cluster in virtualized or cloud environments. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_platform_agnostic/modules b/installing/installing_platform_agnostic/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_platform_agnostic/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/snippets b/installing/installing_platform_agnostic/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_platform_agnostic/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_rhv/_attributes b/installing/installing_rhv/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_rhv/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_rhv/images b/installing/installing_rhv/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_rhv/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_rhv/modules b/installing/installing_rhv/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_rhv/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_rhv/snippets b/installing/installing_rhv/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_rhv/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_sno/_attributes b/installing/installing_sno/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_sno/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_sno/images b/installing/installing_sno/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_sno/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_sno/install-sno-installing-sno.adoc b/installing/installing_sno/install-sno-installing-sno.adoc deleted file mode 100644 index 8fb7af3c0ab6..000000000000 --- a/installing/installing_sno/install-sno-installing-sno.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-sno-installing-sno"] -= Installing OpenShift on a single node -:context: install-sno-installing-sno-with-the-assisted-installer -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can install {sno} using the web-based Assisted Installer and a discovery ISO that you generate using the Assisted Installer. You can also install {sno} by using `coreos-installer` to generate the installation ISO. - -ifndef::openshift-origin[] - -== Installing {sno} using the Assisted Installer - -To install {product-title} on a single node, use the web-based Assisted Installer wizard to guide you through the process and manage the installation. - -include::modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#persistent-storage-using-lvms_logical-volume-manager-storage[Persistent storage using logical volume manager storage] -* xref:../../virt/about_virt/about-virt.adoc#virt-what-you-can-do-with-virt_about-virt[What you can do with OpenShift Virtualization] - -include::modules/install-sno-installing-with-the-assisted-installer.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#installing-with-usb-media_install-sno-installing-sno-with-the-assisted-installer[Creating a bootable ISO image on a USB drive] - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-booting-from-an-iso-over-http-redfish_install-sno-installing-sno-with-the-assisted-installer[Booting from an HTTP-hosted ISO image using the Redfish API] - -* xref:../../nodes/nodes/nodes-sno-worker-nodes.adoc#nodes-sno-worker-nodes[Adding worker nodes to {sno} clusters] - -endif::openshift-origin[] - -[id="install-sno-installing-sno-manually"] -== Installing {sno} manually - -To install {product-title} on a single node, first generate the installation ISO, and then boot the server from the ISO. You can monitor the installation using the `openshift-install` installation program. - -include::modules/install-sno-generating-the-install-iso-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc[Enabling cluster capabilities] for more information about enabling cluster capabilities that were disabled prior to installation. -* See xref:../../installing/cluster-capabilities.adoc#explanation_of_capabilities_cluster-capabilities[Optional cluster capabilities in {product-title} {product-version}] for more information about the features provided by each capability. - -include::modules/install-sno-monitoring-the-installation-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#installing-with-usb-media_install-sno-installing-sno-with-the-assisted-installer[Creating a bootable ISO image on a USB drive] - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-booting-from-an-iso-over-http-redfish_install-sno-installing-sno-with-the-assisted-installer[Booting from an HTTP-hosted ISO image using the Redfish API] - -* xref:../../nodes/nodes/nodes-sno-worker-nodes.adoc#nodes-sno-worker-nodes[Adding worker nodes to {sno} clusters] - -== Installing {sno} on AWS - -include::modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc[leveloffset=+2] - -include::modules/installation-aws_con_installing-sno-on-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] - -include::modules/install-sno-installing-with-usb-media.adoc[leveloffset=+1] - -include::modules/install-booting-from-an-iso-over-http-redfish.adoc[leveloffset=+1] - -include::modules/creating-custom-live-rhcos-iso.adoc[leveloffset=+1] diff --git a/installing/installing_sno/install-sno-preparing-to-install-sno.adoc b/installing/installing_sno/install-sno-preparing-to-install-sno.adoc deleted file mode 100644 index e878cc67f6a7..000000000000 --- a/installing/installing_sno/install-sno-preparing-to-install-sno.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-sno"] -= Preparing to install on a single node -:context: install-sno-preparing -include::_attributes/common-attributes.adoc[] - -toc::[] - -[id="preparing-to-install-sno_{context}"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -include::modules/install-sno-about-installing-on-a-single-node.adoc[leveloffset=+1] - -include::modules/install-sno-requirements-for-installing-on-a-single-node.adoc[leveloffset=+1] diff --git a/installing/installing_sno/modules b/installing/installing_sno/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/installing/installing_sno/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/installing/installing_sno/snippets b/installing/installing_sno/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_sno/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc b/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc deleted file mode 100644 index 3e0c90912326..000000000000 --- a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc +++ /dev/null @@ -1,159 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-vmc-user-infra"] -= Installing a cluster on VMC in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-vmc-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision in a restricted network by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtain the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer -to complete all installation steps. -==== -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-restricted-networks-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-restricted-networks-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring registry storage for VMware vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc b/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc deleted file mode 100644 index f707a1b2453d..000000000000 --- a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc +++ /dev/null @@ -1,142 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vmc-network-customizations-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure and network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-network-customizations-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your VMware vSphere instance using infrastructure you provision with customized network configuration options by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing VXLAN configurations. You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-network-customizations-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-network-customizations-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// Network Operator specific configuration - -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-user-infra.adoc b/installing/installing_vmc/installing-vmc-user-infra.adoc deleted file mode 100644 index 39e12390e130..000000000000 --- a/installing/installing_vmc/installing-vmc-user-infra.adoc +++ /dev/null @@ -1,149 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vmc-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-user-infra -:platform: VMC - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/_attributes b/installing/installing_vsphere/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_vsphere/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_vsphere/images b/installing/installing_vsphere/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_vsphere/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_vsphere/installation-config-parameters-vsphere.adoc b/installing/installing_vsphere/installation-config-parameters-vsphere.adoc deleted file mode 100644 index 349eaddb50bd..000000000000 --- a/installing/installing_vsphere/installation-config-parameters-vsphere.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="installation-config-parameters-vsphere"] -= Installation configuration parameters for vSphere -include::_attributes/common-attributes.adoc[] -:context: installation-config-parameters-vsphere -:platform: vSphere - -toc::[] - -Before you deploy an {product-title} cluster on vSphere, you provide parameters to customize your cluster and the platform that hosts it. When you create the `install-config.yaml` file, you provide values for the required parameters through the command line. You can then modify the `install-config.yaml` file to customize your cluster further. - -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] diff --git a/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc b/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc deleted file mode 100644 index a4c7f5a74326..000000000000 --- a/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-installer-provisioned-vsphere"] -= Installing a cluster on vSphere in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-installer-provisioned-vsphere - -toc::[] - -In {product-title} {product-version}, you can install a cluster on VMware vSphere infrastructure in a restricted network by creating an internal mirror of the installation release content. - -include::snippets/vcenter-support.adoc[] - -[id="prerequisites_installing-restricted-networks-installer-provisioned-vsphere"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide the ReadWriteMany access mode. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -If you are configuring a proxy, be sure to also review this site list. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-creating-image-restricted.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -[id="installing-vsphere-restricted-networks-installer-provisioned-customizations-registry"] -== Creating registry storage - -After you install the cluster, you must create storage for the Registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -[id="next-steps_installing-restricted-networks-installer-provisioned-vsphere"] -== Next steps - -* xref:../../installing/install_config/installing-customizing.adoc#installing-customizing[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. diff --git a/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc b/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc deleted file mode 100644 index e429bc84024b..000000000000 --- a/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc +++ /dev/null @@ -1,181 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-vsphere"] -= Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-vsphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision in a restricted network. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc deleted file mode 100644 index 4c21bb102f56..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc +++ /dev/null @@ -1,113 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned-customizations"] -= Installing a cluster on vSphere with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned-customizations -:platform: vSphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide `ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-customizations-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc deleted file mode 100644 index 1307e3e3c4d0..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc +++ /dev/null @@ -1,124 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned-network-customizations"] -= Installing a cluster on vSphere with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure with customized network configuration options. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, confirm with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// begin network customization -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -// end network customization - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-network-customizations-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] -include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc deleted file mode 100644 index f8f677432d59..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned"] -= Installing a cluster on vSphere -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-network-customizations.adoc b/installing/installing_vsphere/installing-vsphere-network-customizations.adoc deleted file mode 100644 index ac7d9ba54ae7..000000000000 --- a/installing/installing_vsphere/installing-vsphere-network-customizations.adoc +++ /dev/null @@ -1,168 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-network-customizations"] -= Installing a cluster on vSphere with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision with customized network -configuration options. By customizing your network configuration, your cluster -can coexist with existing IP address allocations in your environment and -integrate with existing MTU and VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. Verify that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+2] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc b/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc deleted file mode 100644 index 4a2dc0822686..000000000000 --- a/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-post-installation-configuration"] -= Configuring the vSphere connection settings after an installation -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-post-installation-configuration - - -After installing an {product-title} cluster on vSphere with the platform integration feature enabled, you might need to update the vSphere connection settings manually, depending on the installation method. - -For installations using the Assisted Installer, you must update the connection settings. This is because the Assisted Installer adds default connection settings to the *vSphere connection configuration* wizard as placeholders during the installation. - -For installer-provisioned or user-provisioned infrastructure installations, you should have entered valid connection settings during the installation. You can use the *vSphere connection configuration* wizard at any time to validate or modify the connection settings, but this is not mandatory for completing the installation. - -toc::[] - -include::modules/configuring-vsphere-connection-settings.adoc[leveloffset=+1] - -include::modules/configuring-vsphere-verifying-configuration.adoc[leveloffset=+1] - -For instructions on creating storage objects, see xref:../../storage/dynamic-provisioning.adoc#dynamic-provisioning[Dynamic provisioning]. diff --git a/installing/installing_vsphere/installing-vsphere-three-node.adoc b/installing/installing_vsphere/installing-vsphere-three-node.adoc deleted file mode 100644 index 67378f814fff..000000000000 --- a/installing/installing_vsphere/installing-vsphere-three-node.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-three-node"] -= Installing a three-node cluster on vSphere -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on VMware vSphere. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[Installing a cluster on vSphere with customizations] -* xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[Installing a cluster on vSphere with user-provisioned infrastructure] diff --git a/installing/installing_vsphere/installing-vsphere.adoc b/installing/installing_vsphere/installing-vsphere.adoc deleted file mode 100644 index f90d5ec024d1..000000000000 --- a/installing/installing_vsphere/installing-vsphere.adoc +++ /dev/null @@ -1,171 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere"] -= Installing a cluster on vSphere with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere -:platform: vSphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide `ReadWriteMany` access modes. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/modules b/installing/installing_vsphere/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_vsphere/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc b/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc deleted file mode 100644 index 0be971ab3397..000000000000 --- a/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-vsphere"] -= Preparing to install on vSphere -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-vsphere - -toc::[] - - -[id="preparing-to-install-on-vsphere-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -* If you use a firewall and plan to use Telemetry, you -xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] required by your cluster. - -* You reviewed your VMware platform licenses. Red Hat does not place any restrictions on your VMware licenses, but some VMware infrastructure components require licensing. - -[id="choosing-a-method-to-install-ocp-on-vsphere"] -== Choosing a method to install {product-title} on vSphere - -You can install {product-title} with the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. This method requires no setup for the installer, and is ideal for connected environments like vSphere. Installing with the {ai-full} also provides integration with vSphere, enabling autoscaling. See xref:../../installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc#installing-on-prem-assisted[Installing an on-premise cluster using the {ai-full}] for additional details. - -You can also install {product-title} on vSphere by using installer-provisioned or user-provisioned infrastructure. Installer-provisioned infrastructure is ideal for installing in environments with air-gapped/restricted networks, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provide. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See the xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - - -=== Installer-provisioned infrastructure installation of {product-title} on vSphere - -Installer-provisioned infrastructure allows the installation program to pre-configure and automate the provisioning of resources required by {product-title}. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[Installing a cluster on vSphere]**: You can install {product-title} on vSphere by using installer-provisioned infrastructure installation with no customization. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[Installing a cluster on vSphere with customizations]**: You can install {product-title} on vSphere by using installer-provisioned infrastructure installation with the default customization options. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[Installing a cluster on vSphere with network customizations]**: You can install {product-title} on installer-provisioned vSphere infrastructure, with network customizations. You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[Installing a cluster on vSphere in a restricted network]**: You can install a cluster on VMware vSphere infrastructure in a restricted network by creating an internal mirror of the installation release content. - You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -=== User-provisioned infrastructure installation of {product-title} on vSphere - -User-provisioned infrastructure requires the user to provision all resources required by {product-title}. - -* **xref:../../installing/installing_vsphere/installing-vsphere.adoc#[Installing a cluster on vSphere with user-provisioned infrastructure]**: You can install {product-title} on VMware vSphere infrastructure that you provision. - -* **xref:../../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[Installing a cluster on vSphere with network customizations with user-provisioned infrastructure]**: You can install {product-title} on VMware vSphere infrastructure that you provision with customized network configuration options. - -* **xref:../../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure]**: {product-title} can be installed on VMware vSphere infrastructure that you provision in a restricted network. - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. - -== Configuring the vSphere connection settings - -* **xref:../../installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc#installing-vsphere-post-installation-configuration[Updating the vSphere connection settings following an installation]**: For installations on vSphere using the Assisted Installer, you must manually update the vSphere connection settings to complete the installation. For installer-provisioned or user-provisioned infrastructure installations on vSphere, you can optionally validate or modify the vSphere connection settings at any time. - -== Uninstalling an installer-provisioned infrastructure installation of {product-title} on vSphere - -* **xref:../../installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc#uninstalling-cluster-vsphere-installer-provisioned[Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure]**: You can remove a cluster that you deployed on VMware vSphere infrastructure that used installer-provisioned infrastructure. diff --git a/installing/installing_vsphere/snippets b/installing/installing_vsphere/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_vsphere/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc b/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc deleted file mode 100644 index 92ef5ed520cf..000000000000 --- a/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-vsphere-installer-provisioned"] -= Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-vsphere-installer-provisioned - -toc::[] - -You can remove a cluster that you deployed in your VMware vSphere instance by using installer-provisioned infrastructure. - -[NOTE] -==== -When you run the `openshift-install destroy cluster` command to uninstall {product-title}, vSphere volumes are not automatically deleted. The cluster administrator must manually find the vSphere volumes and delete them. -==== - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc b/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc deleted file mode 100644 index 2d89bc05a81f..000000000000 --- a/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-vsphere-problem-detector-operator"] -= Using the vSphere Problem Detector Operator -include::_attributes/common-attributes.adoc[] -:context: vsphere-problem-detector - -toc::[] - -// About the operator -include::modules/vsphere-problem-detector-about.adoc[leveloffset=+1] - -// Run the checks -include::modules/vsphere-problem-detector-running.adoc[leveloffset=+1] - -// View the events -include::modules/vsphere-problem-detector-viewing-events.adoc[leveloffset=+1] - -// View the logs -include::modules/vsphere-problem-detector-viewing-logs.adoc[leveloffset=+1] - -// Reference: Problem detector checks -include::modules/vsphere-problem-detector-config-checks.adoc[leveloffset=+1] - -// Concept: Storage class config check -include::modules/vsphere-problem-detector-storage-class-config-check.adoc[leveloffset=+1] - -// Metrics -include::modules/vsphere-problem-detector-metrics.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] diff --git a/installing/installing_with_agent_based_installer/_attributes b/installing/installing_with_agent_based_installer/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_with_agent_based_installer/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/images b/installing/installing_with_agent_based_installer/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_with_agent_based_installer/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc b/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc deleted file mode 100644 index d7cbe43b7e40..000000000000 --- a/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-with-agent-based-installer"] -= Installing a {product-title} cluster with the Agent-based Installer -include::_attributes/common-attributes.adoc[] -:context: installing-with-agent-based-installer - -toc::[] - -Use the following procedures to install an {product-title} cluster using the Agent-based Installer. - -[id="prerequisites_installing-with-agent-based-installer"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -// This anchor ID is extracted/replicated from the former installing-ocp-agent.adoc module to preserve links. -[id="installing-ocp-agent_installing-with-agent-based-installer"] -== Installing {product-title} with the Agent-based Installer - -The following procedures deploy a single-node {product-title} in a disconnected environment. You can use these procedures as a basis and modify according to your requirements. - -include::modules/installing-ocp-agent-download.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-boot.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-tui.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-verify.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#modifying-install-config-for-dual-stack-network_ipi-install-installation-workflow[Deploying with dual-stack networking]. -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-the-install-config-file_ipi-install-installation-workflow[Configuring the install-config yaml file]. -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-three-node-cluster_installing-restricted-networks-bare-metal[Configuring a three-node cluster] to deploy three-node clusters in bare metal environments. -* See xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#root-device-hints_preparing-to-install-with-agent-based-installer[About root device hints]. -* See link:https://nmstate.io/examples.html[NMState state examples]. - -include::modules/sample-ztp-custom-resources.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-deploying-far-edge-clusters-at-scale[Challenges of the network far edge] to learn more about {ztp-first}. diff --git a/installing/installing_with_agent_based_installer/modules b/installing/installing_with_agent_based_installer/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_with_agent_based_installer/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc b/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc deleted file mode 100644 index b41a0cda5e35..000000000000 --- a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-an-agent-based-installed-cluster-for-mce"] -= Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes Operator -include::_attributes/common-attributes.adoc[] -:context: preparing-an-agent-based-installed-cluster-for-mce - -toc::[] - -You can install the multicluster engine for Kubernetes Operator and deploy a hub cluster with the Agent-based {product-title} Installer. -The following procedure is partially automated and requires manual steps after the initial cluster is deployed. - -== Prerequisites -* You have read the following documentation: -** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview[Cluster lifecycle with multicluster engine operator overview]. -** xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. -** xref:../../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#about-ztp_ztp-deploying-far-edge-clusters-at-scale[Using ZTP to provision clusters at the network far edge]. -** xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Preparing to install with the Agent-based Installer]. -** xref:../../installing/disconnected_install/index.adoc#installing-mirroring-disconnected-about[About disconnected installation mirroring]. -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* If you are installing in a disconnected environment, you must have a configured local mirror registry for disconnected installation mirroring. - -include::modules/preparing-an-inital-cluster-deployment-for-mce-disconnected.adoc[leveloffset=+1] - -include::modules/preparing-an-inital-cluster-deployment-for-mce-connected.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[The Local Storage Operator] diff --git a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc b/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc deleted file mode 100644 index 3b9160d331a6..000000000000 --- a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-with-agent-based-installer"] -= Preparing to install with the Agent-based installer -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-with-agent-based-installer - -toc::[] - -[id="about-the-agent-based-installer"] -== About the Agent-based Installer - -The Agent-based installation method provides the flexibility to boot your on-premises servers in any way that you choose. It combines the ease of use of the Assisted Installation service with the ability to run offline, including in air-gapped environments. -Agent-based installation is a subcommand of the {product-title} installer. -It generates a bootable ISO image containing all of the information required to deploy an {product-title} cluster, with an available release image. - -The configuration is in the same format as for the installer-provisioned infrastructure and user-provisioned infrastructure installation methods. -The Agent-based Installer can also optionally generate or accept Zero Touch Provisioning (ZTP) custom resources. ZTP allows you to provision new edge sites with declarative configurations of bare-metal equipment. - -include::modules/understanding-agent-install.adoc[leveloffset=+1] - -include::modules/agent-installer-fips-compliance.adoc[leveloffset=+1] - -include::modules/agent-installer-configuring-fips-compliance.adoc[leveloffset=+1] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/articles/5059881[OpenShift Security Guide Book] - -//// -* xref:../../installing/installing-fips.adoc#installing-fips[Support for FIPS cryptography] -//// - -include::modules/agent-install-networking.adoc[leveloffset=+1] - -include::modules/agent-install-sample-config-bonds-vlans.adoc[leveloffset=+1] - -include::modules/agent-install-sample-config-bond-sriov.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_networking/configuring-network-bonding_configuring-and-managing-networking[Configuring network bonding] - -include::modules/installation-bare-metal-agent-installer-config-yaml.adoc[leveloffset=+1] - -include::modules/validations-before-agent-iso-creation.adoc[leveloffset=+1] - -include::modules/agent-install-ipi-install-root-device-hints.adoc[leveloffset=+1] - -[id="agent-based-installation-next-steps"] -== Next steps - -* xref:../../installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc#installing-with-agent-based-installer[Installing a cluster with the Agent-based Installer] diff --git a/installing/installing_with_agent_based_installer/snippets b/installing/installing_with_agent_based_installer/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_with_agent_based_installer/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc b/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc deleted file mode 100644 index 83ab77c427f8..000000000000 --- a/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-disconnected-installation-mirroring"] -= Understanding disconnected installation mirroring -include::_attributes/common-attributes.adoc[] -:context: understanding-disconnected-installation-mirroring - -toc::[] -// Reusing applicable content from Disconnected installation mirroring assembly - -You can use a mirror registry for disconnected installations and to ensure that your clusters only use container images that satisfy your organization's controls on external content. Before you install a cluster on infrastructure that you provision in a disconnected environment, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -[id="agent-install-mirroring-images-disconnected"] -== Mirroring images for a disconnected installation through the Agent-based Installer - -You can use one of the following procedures to mirror your {product-title} image repository to your mirror registry: - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[Mirroring images for a disconnected installation] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -include::modules/agent-install-about-mirroring-for-disconnected-registry.adoc[leveloffset=+1] - -include::modules/agent-install-configuring-for-disconnected-registry.adoc[leveloffset=+2] \ No newline at end of file diff --git a/installing/modules b/installing/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/installing/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/installing/snippets b/installing/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/installing/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/installing/validating-an-installation.adoc b/installing/validating-an-installation.adoc deleted file mode 100644 index 8dba622240f9..000000000000 --- a/installing/validating-an-installation.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="validating-an-installation"] -= Validating an installation -include::_attributes/common-attributes.adoc[] -:context: validating-an-installation - -toc::[] - -You can check the status of an {product-title} cluster after an installation by following the procedures in this document. - -//Reviewing the installation log -include::modules/reviewing-the-installation-log.adoc[leveloffset=+1] - -//Viewing the image pull source -include::modules/viewing-the-image-pull-source.adoc[leveloffset=+1] - -//Getting cluster version, status, and update details -include::modules/getting-cluster-version-status-and-update-details.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/troubleshooting/troubleshooting-installations.adoc#querying-operator-status-after-installation_troubleshooting-installations[Querying Operator status after installation] for more information about querying Operator status if your installation is still progressing. - -* See xref:../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for information about investigating issues with Operators. - -* See xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating a cluster using the web console] for more information on updating your cluster. - -* See xref:../updating/understanding_updates/understanding-update-channels-release.adoc#understanding-update-channels-releases[Understanding update channels and releases] for an overview about update release channels. - -//Querying the status of the cluster nodes by using the CLI -include::modules/querying-the-status-of-cluster-nodes-using-the-cli.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] for more details about reviewing node health and investigating node issues. - -//Reviewing the cluster status from the OpenShift Container Platform web console -include::modules/reviewing-cluster-status-from-the-openshift-web-console.adoc[leveloffset=+1] - -//Reviewing the cluster status from {cluster-manager} -include::modules/reviewing-cluster-status-from-the-openshift-cluster-manager.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc#using-insights-to-identify-issues-with-your-cluster[Using Insights to identify issues with your cluster] for more information about reviewing potential issues with your cluster. - -//Checking cluster resource availability and utilization -include::modules/checking-cluster-resource-availability-and-utilization.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] for more information about the {product-title} monitoring stack. - -//Listing alerts that are firing -include::modules/listing-alerts-that-are-firing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/managing-alerts.adoc#managing-alerts[Managing alerts] for further details about alerting in {product-title}. - -[id="validating-an-installation-next-steps"] -== Next steps - -* See xref:../support/troubleshooting/troubleshooting-installations.adoc#troubleshooting-installations[Troubleshooting installations] if you experience issues when installing your cluster. - -* After installing {product-title}, you can xref:../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[further expand and customize your cluster]. diff --git a/logging/.meta-logging-quickref.adoc b/logging/.meta-logging-quickref.adoc deleted file mode 100644 index 1f902314ffd8..000000000000 --- a/logging/.meta-logging-quickref.adoc +++ /dev/null @@ -1,46 +0,0 @@ -= Logging Meta Reference for Writers - -This hidden file contains meta content for writers and is not for inclusion in published docs. For Logging 5.5+ documentation has shifted to a per version of Logging approach. Files created for logging after this change follow the naming convention 'logging-description', while files created prior to this change use 'cluster-logging-description'. - -== Logging Files -Files referenced only apply to versions 5.5+ of logging. - -* Assemblies -** logging-5-5-administration.adoc -** logging-5-5-architecture.adoc -** logging-5-5-configuration.adoc -** logging-5-5-reference.adoc -** logging-5-5-release-notes.adoc -** logging-5-6-administration.adoc -** logging-5-6-architecture.adoc -** logging-5-6-configuration.adoc -** logging-5-6-reference.adoc -** logging-5-6-release-notes.adoc - -.Include syntax: ----- -\include::target[leveloffset=offset,lines=ranges] -\include::modules/logging-module-name.adoc[leveloffset=+1,lines=5..10] -\include::snippets/ ----- - -* Modules -** logging-rn-5.5.5.adoc -** logging-rn-5.5.4.adoc -** logging-rn-5.5.3.adoc -** logging-rn-5.5.2.adoc -** logging-rn-5.5.1.adoc -** logging-rn-5.5.0.adoc -** logging-loki-retention.adoc - - -* Snippets -** logging-stable-updates-snip.adoc[] -** logging-log-types-snip.adoc[] -** logging-compatibility-snip.adoc[] -** logging-loki-vs-lokistack-snip.adoc[] -** logging-create-secret-snip.adoc[] -** logging-supported-config-snip.adoc[] -** logging-approval-strategy-snip.adoc[] -** logging-subscription-object-snip.adoc[ -** logging-create-apply-cr-snip.adoc[] diff --git a/logging/_attributes b/logging/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/logging/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/logging/cluster-logging-dashboards.adoc b/logging/cluster-logging-dashboards.adoc deleted file mode 100644 index 741170805983..000000000000 --- a/logging/cluster-logging-dashboards.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging-dashboards"] -= Viewing cluster dashboards -:context: cluster-logging-dashboards - -toc::[] - -The *Logging/Elasticsearch Nodes* and *Openshift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -contain in-depth details about your Elasticsearch instance and the individual Elasticsearch nodes that you can use to prevent and diagnose problems. - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster level, including cluster resources, garbage collection, shards in the cluster, and Fluentd statistics. - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node level, including details on indexing, shards, resources, and so forth. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-dashboards-access.adoc[leveloffset=+1] - -For information on the dashboard charts, see xref:../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[About the OpenShift Logging dashboard] and xref:../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-es_cluster-logging-dashboards[About the Logging/Elastisearch Nodes dashboard]. - -include::modules/cluster-logging-dashboards-logging.adoc[leveloffset=+1] -include::modules/cluster-logging-dashboards-es.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-deploying.adoc b/logging/cluster-logging-deploying.adoc deleted file mode 100644 index d0826a689af3..000000000000 --- a/logging/cluster-logging-deploying.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-deploying -[id="cluster-logging-deploying"] -= Installing the {logging-title} -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -You can install the {logging-title} by deploying the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. The OpenShift Elasticsearch Operator creates and manages the Elasticsearch cluster used by OpenShift Logging. The {logging} Operator creates and manages the components of the logging stack. - -The process for deploying the {logging} to {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::[] - involves: - -* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[{logging-uc} storage considerations]. - -* Installing the logging subsystem for {product-title} using xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-console_cluster-logging-deploying[the web console] or xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-cli_cluster-logging-deploying[the CLI]. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-deploy-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators from the OperatorHub] -* xref:../logging/config/cluster-logging-collector.adoc#cluster-logging-removing-unused-components-if-no-elasticsearch_cluster-logging-collector[Removing unused components if you do not use the default Elasticsearch log store] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-adding-operators-to-cluster.html[Installing Operators from OperatorHub] -* link:https://docs.openshift.com/container-platform/latest/logging/config/cluster-logging-collector.html#cluster-logging-removing-unused-components-if-no-elasticsearch_cluster-logging-collector[Removing unused components if you do not use the default Elasticsearch log store] -endif::[] - -== Post-installation tasks - -If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. - -If your network plugin enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. - - -include::modules/cluster-logging-deploy-cli.adoc[leveloffset=+1] - -== Post-installation tasks - -If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. - -If your network plugin enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. - -include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+2] - -include::modules/cluster-logging-deploy-multitenant.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../networking/network_policy/about-network-policy.adoc[About network policy] -* xref:../networking/openshift_sdn/about-openshift-sdn.adoc[About the OpenShift SDN default CNI network provider] -* xref:../networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/network_policy/about-network-policy.html[About network policy] -* link:https://docs.openshift.com/container-platform/latest/networking/openshift_sdn/about-openshift-sdn.html[About the OpenShift SDN default CNI network provider] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.html[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] - -// include::modules/cluster-logging-deploy-memory.adoc[leveloffset=+1] - -// include::modules/cluster-logging-deploy-certificates.adoc[leveloffset=+1] - -// include::modules/cluster-logging-deploy-label.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-enabling-json-logging.adoc b/logging/cluster-logging-enabling-json-logging.adoc deleted file mode 100644 index daca7c1d0c28..000000000000 --- a/logging/cluster-logging-enabling-json-logging.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-enabling-json-logging -[id="cluster-logging-enabling-json-logging"] -= Enabling JSON logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can configure the Log Forwarding API to parse JSON strings into a structured object. - -include::modules/cluster-logging-json-log-forwarding.adoc[leveloffset=+1] -include::modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc[leveloffset=+1] -include::modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems] diff --git a/logging/cluster-logging-eventrouter.adoc b/logging/cluster-logging-eventrouter.adoc deleted file mode 100644 index c0397eee4000..000000000000 --- a/logging/cluster-logging-eventrouter.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-eventrouter -[id="cluster-logging-eventrouter"] -= Collecting and storing Kubernetes events -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by the {logging}. You must manually deploy the Event Router. - -The Event Router collects events from all projects and writes them to `STDOUT`. The collector then forwards those events to the store defined in the `ClusterLogForwarder` custom resource (CR). - -[IMPORTANT] -==== -The Event Router adds additional load to Fluentd and can impact the number of other log messages that can be processed. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-eventrouter-deploy.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-exported-fields.adoc b/logging/cluster-logging-exported-fields.adoc deleted file mode 100644 index 1a92d4622819..000000000000 --- a/logging/cluster-logging-exported-fields.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-exported-fields"] -= Log Record Fields -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: cluster-logging-exported-fields - -toc::[] - -The following fields can be present in log records exported by the {logging}. Although log records are typically formatted as JSON objects, the same data model can be applied to other encodings. - -To search these fields from Elasticsearch and Kibana, use the full dotted field name when searching. For example, with an Elasticsearch */_search URL*, to look for a Kubernetes pod name, use `/_search/q=kubernetes.pod_name:name-of-my-pod`. - -// The logging system can parse JSON-formatted log entries to external systems. These log entries are formatted as a fluentd message with extra fields such as `kubernetes`. The fields exported by the logging system and available for searching from Elasticsearch and Kibana are documented at the end of this document. - -include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0] - -include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0] - -// add modules/cluster-logging-exported-fields-openshift when available diff --git a/logging/cluster-logging-external.adoc b/logging/cluster-logging-external.adoc deleted file mode 100644 index 465c610fa00d..000000000000 --- a/logging/cluster-logging-external.adoc +++ /dev/null @@ -1,211 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-external -[id="cluster-logging-external"] -= Forwarding logs to external third-party logging systems -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. - -To send logs to other log aggregators, you use the {product-title} Cluster Log Forwarder. This API enables you to send container, infrastructure, and audit logs to specific endpoints within or outside your cluster. In addition, you can send different types of logs to various systems so that various individuals can access each type. You can also enable Transport Layer Security (TLS) support to send logs securely, as required by your organization. - -[NOTE] -==== -To send audit logs to the default internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Forward audit logs to the log store]. -==== - -When you forward logs externally, the {logging} creates or modifies a Fluentd config map to send logs using your desired protocols. You are responsible for configuring the protocol on the external log aggregator. - -[IMPORTANT] -==== -You cannot use the config map methods and the Cluster Log Forwarder in the same cluster. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1] - -[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] -=== Forwarding logs to Amazon CloudWatch from STS enabled clusters - -For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the -ifdef::openshift-enterprise,openshift-origin[] -xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)] -endif::[] - utility `ccoctl`. - -[NOTE] -==== -This feature is not supported by the vector collector. -==== - -.Prerequisites - -* {logging-title-uc}: 5.5 and later - - -.Procedure -. Create a `CredentialsRequest` custom resource YAML by using the template below: -+ -.CloudWatch credentials request template -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: -credrequest - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - logs:PutLogEvents - - logs:CreateLogGroup - - logs:PutRetentionPolicy - - logs:CreateLogStream - - logs:DescribeLogGroups - - logs:DescribeLogStreams - effect: Allow - resource: arn:aws:logs:*:*:* - secretRef: - name: - namespace: openshift-logging - serviceAccountNames: - - logcollector ----- -+ -. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `//manifests/openshift-logging--credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider. -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com <1> ----- -<1> is the name used to tag your cloud resources and should match the name used during your STS cluster install -+ -. Apply the secret created: -[source,terminal] -+ ----- -$ oc apply -f output/manifests/openshift-logging--credentials.yaml ----- -+ -. Create or edit a `ClusterLogForwarder` custom resource: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: cw <3> - type: cloudwatch <4> - cloudwatch: - groupBy: logType <5> - groupPrefix: <6> - region: us-east-2 <7> - secret: - name: <8> - pipelines: - - name: to-cloudwatch <9> - inputRefs: <10> - - infrastructure - - audit - - application - outputRefs: - - cw <11> ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `cloudwatch` type. -<5> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type -* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<6> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<7> Specify the AWS region. -<8> Specify the name of the secret that contains your AWS credentials. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. - -[role="_additional-resources"] -.Additional resources -* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference] - -include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+2] - -include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1] - -include::modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields-kubernetes_cluster-logging-exported-fields[Log Record Fields]. - -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server] - -ifndef::openshift-rosa[] -include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1] -endif::openshift-rosa[] - -include::modules/logging-forward-splunk.adoc[leveloffset=+1] - -include::modules/logging-http-forward.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] - -include::modules/cluster-logging-troubleshooting-log-forwarding.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-loki.adoc b/logging/cluster-logging-loki.adoc deleted file mode 100644 index 7fc2561b7074..000000000000 --- a/logging/cluster-logging-loki.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-loki -[id="cluster-logging-loki"] -= Loki -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/cluster-logging-loki-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-loki-deploy.adoc[leveloffset=+1] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-lokistack.adoc[leveloffset=+1] - -include::modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc[leveloffset=+2] - -== Additional Resources -* link:https://grafana.com/docs/loki/latest/logql/[Loki Query Language (LogQL) Documentation] -* link:https://loki-operator.dev/docs/howto_connect_grafana.md/[Grafana Dashboard Documentation] -* link:https://loki-operator.dev/docs/object_storage.md/[Loki Object Storage Documentation] -* link:https://grafana.com/docs/loki/latest/operations/storage/schema/#changing-the-schema[Loki Storage Schema Documentation] diff --git a/logging/cluster-logging-release-notes.adoc b/logging/cluster-logging-release-notes.adoc deleted file mode 100644 index e09220f8694b..000000000000 --- a/logging/cluster-logging-release-notes.adoc +++ /dev/null @@ -1,1086 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-release-notes"] -include::_attributes/common-attributes.adoc[] -= Release notes for Logging - -:context: cluster-logging-release-notes-v5x - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.7.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.1.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.7.0.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.8.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.2.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.1.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.8.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.7.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.2.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-5-1"] -== Logging 5.5.1 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6344[OpenShift Logging Bug Fix Release 5.5.1]. - -[id="openshift-logging-5-5-1-enhancements_{context}"] -=== Enhancements -* This enhancement adds an *Aggregated Logs* tab to the *Pod Details* page of the {product-title} web console when the Logging Console Plugin is in use. This enhancement is only available on {product-title} 4.10 and later. (link:https://issues.redhat.com/browse/LOG-2647[LOG-2647]) - -* This enhancement adds Google Cloud Logging as an output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-1482[LOG-1482]) -//xref:cluster-logging-collector-log-forward-gcp.adoc - -[id="openshift-logging-5-5-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2745[LOG-2745]) - -* Before this update, Fluentd would sometimes not recognize that the Kubernetes platform rotated the log file and would no longer read log messages. This update corrects that by setting the configuration parameter suggested by the upstream development team. (link:https://issues.redhat.com/browse/LOG-2995[LOG-2995]) - -* Before this update, the addition of multi-line error detection caused internal routing to change and forward records to the wrong destination. With this update, the internal routing is correct. (link:https://issues.redhat.com/browse/LOG-2801[LOG-2801]) - -* Before this update, changing the {product-title} web console's refresh interval created an error when the *Query* field was empty. With this update, changing the interval is not an available option when the *Query* field is empty. (link:https://issues.redhat.com/browse/LOG-2917[LOG-2917]) - -[id="openshift-logging-5-5-1-cves_{context}"] -=== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1705[CVE-2022-1705] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32148[CVE-2022-32148] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] - -include::modules/cluster-logging-rn-5.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.14.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.11.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.8.adoc[leveloffset=+1] - -//include::modules/cluster-logging-rn-5.4.7.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.2.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-4-1_{context}"] -== Logging 5.4.1 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2216[RHSA-2022:2216-OpenShift Logging Bug Fix Release 5.4.1]. - -[id="openshift-logging-5-4-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the log file metric exporter only reported logs created while the exporter was running, which resulted in inaccurate log growth data. This update resolves this issue by monitoring `/var/log/pods`. (https://issues.redhat.com/browse/LOG-2442[LOG-2442]) - -* Before this update, the collector would be blocked because it continually tried to use a stale connection when forwarding logs to fluentd forward receivers. With this release, the `keepalive_timeout` value has been set to 30 seconds (`30s`) so that the collector recycles the connection and re-attempts to send failed messages within a reasonable amount of time. (https://issues.redhat.com/browse/LOG-2534[LOG-2534]) - -* Before this update, an error in the gateway component enforcing tenancy for reading logs limited access to logs with a Kubernetes namespace causing "audit" and some "infrastructure" logs to be unreadable. With this update, the proxy correctly detects users with admin access and allows access to logs without a namespace. (https://issues.redhat.com/browse/LOG-2448[LOG-2448]) - -* Before this update, the `system:serviceaccount:openshift-monitoring:prometheus-k8s` service account had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the service account` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2437[LOG-2437]) - -* Before this update, Linux audit log time parsing relied on an ordinal position of a key/value pair. This update changes the parsing to use a regular expression to find the time entry. (https://issues.redhat.com/browse/LOG-2321[LOG-2321]) - - -[id="openshift-logging-5-4-1-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - - -[id="cluster-logging-release-notes-5-4-0_{context}"] -== Logging 5.4 -The following advisories are available for logging 5.4: -link:https://access.redhat.com/errata/RHSA-2022:1461[{logging-title-uc} Release 5.4] - -[id="openshift-logging-5-4-0-tech-prev_{context}"] -=== Technology Previews - -include::modules/cluster-logging-vector-tech-preview.adoc[leveloffset=+2] -include::modules/cluster-logging-loki-tech-preview.adoc[leveloffset=+2] - -[id="openshift-logging-5-4-0-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, the `cluster-logging-operator` used cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were created when deploying the Operator using the console interface but were missing when deploying from the command line. This update fixes the issue by making the roles and bindings namespace-scoped. (link:https://issues.redhat.com/browse/LOG-2286[LOG-2286]) - -* Before this update, a prior change to fix dashboard reconciliation introduced a `ownerReferences` field to the resource across namespaces. As a result, both the config map and dashboard were not created in the namespace. With this update, the removal of the `ownerReferences` field resolves the issue, and the OpenShift Logging dashboard is available in the console. (link:https://issues.redhat.com/browse/LOG-2163[LOG-2163]) - -* Before this update, changes to the metrics dashboards did not deploy because the `cluster-logging-operator` did not correctly compare existing and modified config maps that contain the dashboard. With this update, the addition of a unique hash value to object labels resolves the issue. (link:https://issues.redhat.com/browse/LOG-2071[LOG-2071]) - -* Before this update, the OpenShift Logging dashboard did not correctly display the pods and namespaces in the table, which displays the top producing containers collected over the last 24 hours. With this update, the pods and namespaces are displayed correctly. (link:https://issues.redhat.com/browse/LOG-2069[LOG-2069]) - -* Before this update, when the `ClusterLogForwarder` was set up with `Elasticsearch OutputDefault` and Elasticsearch outputs did not have structured keys, the generated configuration contained the incorrect values for authentication. This update corrects the secret and certificates used. (link:https://issues.redhat.com/browse/LOG-2056[LOG-2056]) - -* Before this update, the OpenShift Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the correct data point has been selected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2026[LOG-2026]) - -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image.(link:https://issues.redhat.com/browse/LOG-1927[LOG-1927]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the logging collector to generate the `FluentdNodeDown` alert. This update resolves the issue by fixing the job name for the Prometheus alert. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, the log collector was collecting its own logs due to a refactoring of the component name change. This lead to a potential feedback loop of the collector processing its own log that might result in memory and log message size issues. This update resolves the issue by excluding the collector logs from the collection. (link:https://issues.redhat.com/browse/LOG-1774[LOG-1774]) - -* Before this update, Elasticsearch generated the error `Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota.` if the PVC already existed. With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2131[LOG-2131]) - -* Before this update, Elasticsearch was unable to return to the ready state when the `elasticsearch-signing` secret was removed. With this update, Elasticsearch is able to go back to the ready state after that secret is removed. (link:https://issues.redhat.com/browse/LOG-2171[LOG-2171]) - -* Before this update, the change of the path from which the collector reads container logs caused the collector to forward some records to the wrong indices. With this update, the collector now uses the correct configuration to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2160[LOG-2160]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1899[LOG-1899]) - -* Before this update, the *{product-title} Logging* dashboard showed the number of shards 'x' times larger than the actual value when Elasticsearch had 'x' nodes. This issue occurred because it was printing all primary shards for each Elasticsearch pod and calculating a sum on it, although the output was always for the whole Elasticsearch cluster. With this update, the number of shards is now correctly calculated. (link:https://issues.redhat.com/browse/LOG-2156[LOG-2156]) - -* Before this update, the secrets `kibana` and `kibana-proxy` were not recreated if they were deleted manually. With this update, the `elasticsearch-operator` will watch the resources and automatically recreate them if deleted. (link:https://issues.redhat.com/browse/LOG-2250[LOG-2250]) - -* Before this update, tuning the buffer chunk size could cause the collector to generate a warning about the chunk size exceeding the byte limit for the event stream. With this update, you can also tune the read line limit, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2379[LOG-2379]) - -* Before this update, the logging console link in OpenShift web console was not removed with the ClusterLogging CR. With this update, deleting the CR or uninstalling the Cluster Logging Operator removes the link. (link:https://issues.redhat.com/browse/LOG-2373[LOG-2373]) - -* Before this update, a change to the container logs path caused the collection metric to always be zero with older releases configured with the original path. With this update, the plugin which exposes metrics about collected logs supports reading from either path to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2462[LOG-2462]) - -=== CVEs -[id="openshift-logging-5-4-0-CVEs_{context}"] -* link:https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2058404[BZ-2058404] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2045880[BZ-2045880] - -include::modules/cluster-logging-rn-5.3.14.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.11.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.8.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-3-7_{context}"] -== OpenShift Logging 5.3.7 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2217[RHSA-2022:2217 OpenShift Logging Bug Fix Release 5.3.7] - -[id="openshift-logging-5-3-7-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Linux audit log time parsing relied on an ordinal position of key/value pair. This update changes the parsing to utilize a regex to find the time entry. (https://issues.redhat.com/browse/LOG-2322[LOG-2322]) - -* Before this update, some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps. (https://issues.redhat.com/browse/LOG-2334[LOG-2334]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2450[LOG-2450]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2481[LOG-2481)]) - -=== CVEs -[id="openshift-logging-5-3-7-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-3-6_{context}"] -== OpenShift Logging 5.3.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:1377[RHBA-2022:1377 OpenShift Logging Bug Fix Release 5.3.6] - -[id="openshift-logging-5-3-6-bug-fixes_{context}"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2126[LOG-2126]) - -* Before this change, it was possible for the collector to generate a warning where the chunk byte limit was exceeding an emitted event. With this change, you can tune the readline limit to resolve the issue as advised by the upstream documentation. (link:https://issues.redhat.com/browse/LOG-2380[LOG-2380]) - -[id="cluster-logging-release-notes-5-3-5_{context}"] -== OpenShift Logging 5.3.5 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHSA-2022:0721[RHSA-2022:0721 OpenShift Logging Bug Fix Release 5.3.5] - -[id="openshift-logging-5-3-5-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2182[LOG-2182]) - -=== CVEs -[id="openshift-logging-5-3-5-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -==== - -[id="cluster-logging-release-notes-5-3-4_{context}"] -== OpenShift Logging 5.3.4 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:0411[RHBA-2022:0411 OpenShift Logging Bug Fix Release 5.3.4] - -[id="openshift-logging-5-3-4-bug-fixes_{context}"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the `cluster-logging-operator` did not correctly compare existing and desired config maps that contained the dashboard. This update fixes the logic by adding a unique hash value to the object labels. (link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-1974[LOG-1974]) - -* Before this update, elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2127[LOG-2127]) - -=== CVEs -[id="openshift-logging-5-3-4-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -* link:https://access.redhat.com/security/cve/CVE-2022-21248[CVE-2022-21248] -* link:https://access.redhat.com/security/cve/CVE-2022-21277[CVE-2022-21277] -* link:https://access.redhat.com/security/cve/CVE-2022-21282[CVE-2022-21282] -* link:https://access.redhat.com/security/cve/CVE-2022-21283[CVE-2022-21283] -* link:https://access.redhat.com/security/cve/CVE-2022-21291[CVE-2022-21291] -* link:https://access.redhat.com/security/cve/CVE-2022-21293[CVE-2022-21293] -* link:https://access.redhat.com/security/cve/CVE-2022-21294[CVE-2022-21294] -* link:https://access.redhat.com/security/cve/CVE-2022-21296[CVE-2022-21296] -* link:https://access.redhat.com/security/cve/CVE-2022-21299[CVE-2022-21299] -* link:https://access.redhat.com/security/cve/CVE-2022-21305[CVE-2022-21305] -* link:https://access.redhat.com/security/cve/CVE-2022-21340[CVE-2022-21340] -* link:https://access.redhat.com/security/cve/CVE-2022-21341[CVE-2022-21341] -* link:https://access.redhat.com/security/cve/CVE-2022-21360[CVE-2022-21360] -* link:https://access.redhat.com/security/cve/CVE-2022-21365[CVE-2022-21365] -* link:https://access.redhat.com/security/cve/CVE-2022-21366[CVE-2022-21366] -==== - -[id="cluster-logging-release-notes-5-3-3_{context}"] -== OpenShift Logging 5.3.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0227[RHSA-2022:0227 OpenShift Logging Bug Fix Release 5.3.3] - -[id="openshift-logging-5-3-3-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the cluster-logging-operator did not correctly compare existing and desired configmaps containing the dashboard. This update fixes the logic by adding a dashboard unique hash value to the object labels.(link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2102[LOG-2102]) - -=== CVEs -[id="openshift-logging-5-3-3-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-3-2_{context}"] -== OpenShift Logging 5.3.2 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0044[RHSA-2022:0044 OpenShift Logging Bug Fix Release 5.3.2] - -[id="openshift-logging-5-3-2-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. (link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - -* Before this update, the OpenShift Logging Dashboard displayed the wrong pod namespace in the table that displays top producing and collected containers over the last 24 hours. With this update, the OpenShift Logging Dashboard displays the correct pod namespace. (link:https://issues.redhat.com/browse/LOG-2051[LOG-2051]) - -* Before this update, if `outputDefaults.elasticsearch.structuredTypeKey` in the `ClusterLogForwarder` custom resource (CR) instance did not have a structured key, the CR replaced the output secret with the default secret used to communicate to the default log store. With this update, the defined output secret is correctly used. (link:https://issues.redhat.com/browse/LOG-2046[LOG-2046]) - -[id="openshift-logging-5-3-2-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2020-36327[CVE-2020-36327] -** https://bugzilla.redhat.com/show_bug.cgi?id=1958999[BZ-1958999] -* https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -** https://bugzilla.redhat.com/show_bug.cgi?id=2034067[BZ-2034067] -* https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -==== - -[id="cluster-logging-release-notes-5-3-1_{context}"] -== OpenShift Logging 5.3.1 -This release includes link:https://access.redhat.com/errata/RHSA-2021:5129[RHSA-2021:5129 OpenShift Logging Bug Fix Release 5.3.1] - -[id="openshift-logging-5-3-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image. (link:https://issues.redhat.com/browse/LOG-1998[LOG-1998]) - -* Before this update, the Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the Logging dashboard displays CPU graphs correctly. (link:https://issues.redhat.com/browse/LOG-1925[LOG-1925]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1897[LOG-1897]) - - -[id="openshift-logging-5-3-1-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1944888[BZ-1944888] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004133[BZ-2004133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004135[BZ-2004135] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2030932[BZ-2030932] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - - -[id="cluster-logging-release-notes-5-3-0_{context}"] -== OpenShift Logging 5.3.0 -This release includes link:https://access.redhat.com/errata/RHSA-2021:4627[RHSA-2021:4627 OpenShift Logging Bug Fix Release 5.3.0] - -[id="openshift-logging-5-3-0-new-features-and-enhancements_{context}"] -=== New features and enhancements -* With this update, authorization options for Log Forwarding have been expanded. Outputs may now be configured with SASL, username/password, or TLS. - -[id="openshift-logging-5-3-0-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you forwarded logs using the syslog protocol, serializing a ruby hash encoded key/value pairs to contain a '=>' character and replaced tabs with "#11". This update fixes the issue so that log messages are correctly serialized as valid JSON. (link:https://issues.redhat.com/browse/LOG-1494[LOG-1494]) - -* Before this update, application logs were not correctly configured to forward to the proper Cloudwatch stream with multi-line error detection enabled. (link:https://issues.redhat.com/browse/LOG-1939[LOG-1939]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the alert 'fluentnodedown' to generate. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1735[LOG-1735]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry "level" based on the "level" field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1199[LOG-1199]) - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1776[LOG-1776]) - -[id="openshift-logging-5-3-0-known-issues_{context}"] -=== Known issues -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-3-0-deprecated-removed-features_{context}"] -=== Deprecated and removed features -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-3-0-legacy-forwarding_{context}"] -==== Forwarding logs using the legacy Fluentd and legacy syslog methods have been removed - -In OpenShift Logging 5.3, the legacy methods of forwarding logs to Syslog and Fluentd are removed. Bug fixes and support are provided through the end of the OpenShift Logging 5.2 life cycle. After which, no new feature enhancements are made. - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] - -[id="openshift-logging-5-3-0-legacy-forwarding-config_{context}"] -==== Configuration mechanisms for legacy forwarding methods have been removed - -In OpenShift Logging 5.3, the legacy configuration mechanism for log forwarding is removed: You cannot forward logs using the legacy Fluentd method and legacy Syslog method. Use the standard log forwarding methods instead. - -[id="openshift-logging-5-3-0-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-20673.html[CVE-2018-20673] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-14615.html[CVE-2019-14615] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-0427.html[CVE-2020-0427] -* link:https://www.redhat.com/security/data/cve/CVE-2020-10001.html[CVE-2020-10001] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24502.html[CVE-2020-24502] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24503.html[CVE-2020-24503] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24504.html[CVE-2020-24504] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24586.html[CVE-2020-24586] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24587.html[CVE-2020-24587] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24588.html[CVE-2020-24588] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26139.html[CVE-2020-26139] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26140.html[CVE-2020-26140] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26141.html[CVE-2020-26141] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26143.html[CVE-2020-26143] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26144.html[CVE-2020-26144] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26145.html[CVE-2020-26145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26146.html[CVE-2020-26146] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26147.html[CVE-2020-26147] -* link:https://www.redhat.com/security/data/cve/CVE-2020-27777.html[CVE-2020-27777] -* link:https://www.redhat.com/security/data/cve/CVE-2020-29368.html[CVE-2020-29368] -* link:https://www.redhat.com/security/data/cve/CVE-2020-29660.html[CVE-2020-29660] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35448.html[CVE-2020-35448] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36158.html[CVE-2020-36158] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36312.html[CVE-2020-36312] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36386.html[CVE-2020-36386] -* link:https://www.redhat.com/security/data/cve/CVE-2021-0129.html[CVE-2021-0129] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3348.html[CVE-2021-3348] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3487.html[CVE-2021-3487] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3489.html[CVE-2021-3489] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3564.html[CVE-2021-3564] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3573.html[CVE-2021-3573] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3600.html[CVE-2021-3600] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3635.html[CVE-2021-3635] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3659.html[CVE-2021-3659] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3679.html[CVE-2021-3679] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3732.html[CVE-2021-3732] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3778.html[CVE-2021-3778] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3796.html[CVE-2021-3796] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20194.html[CVE-2021-20194] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20197.html[CVE-2021-20197] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20239.html[CVE-2021-20239] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20284.html[CVE-2021-20284] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23133.html[CVE-2021-23133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23840.html[CVE-2021-23840] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23841.html[CVE-2021-23841] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28950.html[CVE-2021-28950] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28971.html[CVE-2021-28971] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29155.html[CVE-2021-29155] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29646.htm[lCVE-2021-29646] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29650.html[CVE-2021-29650] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31440.html[CVE-2021-31440] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31829.html[CVE-2021-31829] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31916.html[CVE-2021-31916] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33033.html[CVE-2021-33033] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33194.html[CVE-2021-33194] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33200.html[CVE-2021-33200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -==== - -include::modules/cluster-logging-rn-5.2.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.2.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.2.11.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-2-10_{context}"] -== OpenShift Logging 5.2.10 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/[ OpenShift Logging Bug Fix Release 5.2.10]] - -[id="openshift-logging-5-2-10-bug-fixes_{context}"] -=== Bug fixes -* Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps.(https://issues.redhat.com/browse/LOG-2335[LOG-2335]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2475[LOG-2475]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2480[LOG-2480]) - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface and were missing when the Operator was deployed from the command line. This fixes the issue by making this role and binding namespace scoped. (https://issues.redhat.com/browse/LOG-1972[LOG-1972]) - -[id="openshift-logging-5-2-10-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* link:https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* link:https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* link:https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* link:https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* link:https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* link:https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* link:https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* link:https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* link:https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-2-9_{context}"] -== OpenShift Logging 5.2.9 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:1375[RHBA-2022:1375 OpenShift Logging Bug Fix Release 5.2.9]] - -[id="openshift-logging-5-2-9-bug-fixes_{context}"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2304[LOG-2304]) - -[id="cluster-logging-release-notes-5-2-8_{context}"] -== OpenShift Logging 5.2.8 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0728[RHSA-2022:0728 OpenShift Logging Bug Fix Release 5.2.8] - -[id="openshift-logging-5-2-8-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2180[LOG-2180]) - -[id="openshift-logging-5-2-8-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1930423[BZ-1930423] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2052539[BG-2052539] -==== - -[id="cluster-logging-release-notes-5-2-7_{context}"] -== OpenShift Logging 5.2.7 - -This release includes link:https://access.redhat.com/errata/RHBA-2022:0478[RHBA-2022:0478 OpenShift Logging Bug Fix Release 5.2.7] - -[id="openshift-logging-5-2-7-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch pods with FIPS enabled failed to start after updating. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-2000[LOG-2000]) - -* Before this update, if a persistent volume claim (PVC) already existed, Elasticsearch generated an error, "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2118[LOG-2118]) - -[id="openshift-logging-5-2-7-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -==== - -[id="cluster-logging-release-notes-5-2-6_{context}"] -== OpenShift Logging 5.2.6 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0230[RHSA-2022:0230 OpenShift Logging Bug Fix Release 5.2.6] - -[id="openshift-logging-5-2-6-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the release did not include a filter change which caused Fluentd to crash. With this update, the missing filter has been corrected. (link:https://issues.redhat.com/browse/LOG-2104[LOG-2104]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2101[LOG-2101]) - -[id="openshift-logging-5-2-6-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-2-5_{context}"] -== OpenShift Logging 5.2.5 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0043[RHSA-2022:0043 OpenShift Logging Bug Fix Release 5.2.5] - -[id="openshift-logging-5-2-5-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - - -[id="openshift-logging-5-2-5-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* link:https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* link:https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -* link:https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -==== - -[id="cluster-logging-release-notes-5-2-4_{context}"] -== OpenShift Logging 5.2.4 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:5127[RHSA-2021:5127 OpenShift Logging Bug Fix Release 5.2.4] - -[id="openshift-logging-5-2-4-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, records shipped via syslog would serialize a ruby hash encoding key/value pairs to contain a '=>' character, as well as replace tabs with "#11". This update serializes the message correctly as proper JSON. (link:https://issues.redhat.com/browse/LOG-1775[LOG-1775]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1970[LOG-1970]) - -* Before this update, Elasticsearch sometimes rejected messages when Log Forwarding was configured with multiple outputs. This happened because configuring one of the outputs modified message content to be a single message. With this update, Log Forwarding duplicates the messages for each output so that output-specific processing does not affect the other outputs. (link:https://issues.redhat.com/browse/LOG-1824[LOG-1824]) - - -[id="openshift-logging-5-2-4-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - -[id="cluster-logging-release-notes-5-2-3_{context}"] -== OpenShift Logging 5.2.3 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:4032[RHSA-2021:4032 OpenShift Logging Bug Fix Release 5.2.3] - -[id="openshift-logging-5-2-3-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, some alerts did not include a namespace label. This omission does not comply with the OpenShift Monitoring Team's guidelines for writing alerting rules in {product-title}. With this update, all the alerts in Elasticsearch Operator include a namespace label and follow all the guidelines for writing alerting rules in {product-title}. (link:https://issues.redhat.com/browse/LOG-1857[LOG-1857]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry `level` based on the `level` field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1759[LOG-1759]) - -[id="openshift-logging-5-2-3-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-23369[CVE-2021-23369] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1948761[BZ-1948761] -* link:https://access.redhat.com/security/cve/CVE-2021-23383[CVE-2021-23383] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1956688[BZ-1956688] -* link:https://access.redhat.com/security/cve/CVE-2018-20673[CVE-2018-20673] -* link:https://access.redhat.com/security/cve/CVE-2019-5827[CVE-2019-5827] -* link:https://access.redhat.com/security/cve/CVE-2019-13750[CVE-2019-13750] -* link:https://access.redhat.com/security/cve/CVE-2019-13751[CVE-2019-13751] -* link:https://access.redhat.com/security/cve/CVE-2019-17594[CVE-2019-17594] -* link:https://access.redhat.com/security/cve/CVE-2019-17595[CVE-2019-17595] -* link:https://access.redhat.com/security/cve/CVE-2019-18218[CVE-2019-18218] -* link:https://access.redhat.com/security/cve/CVE-2019-19603[CVE-2019-19603] -* link:https://access.redhat.com/security/cve/CVE-2019-20838[CVE-2019-20838] -* link:https://access.redhat.com/security/cve/CVE-2020-12762[CVE-2020-12762] -* link:https://access.redhat.com/security/cve/CVE-2020-13435[CVE-2020-13435] -* link:https://access.redhat.com/security/cve/CVE-2020-14155[CVE-2020-14155] -* link:https://access.redhat.com/security/cve/CVE-2020-16135[CVE-2020-16135] -* link:https://access.redhat.com/security/cve/CVE-2020-24370[CVE-2020-24370] -* link:https://access.redhat.com/security/cve/CVE-2021-3200[CVE-2021-3200] -* link:https://access.redhat.com/security/cve/CVE-2021-3426[CVE-2021-3426] -* link:https://access.redhat.com/security/cve/CVE-2021-3445[CVE-2021-3445] -* link:https://access.redhat.com/security/cve/CVE-2021-3572[CVE-2021-3572] -* link:https://access.redhat.com/security/cve/CVE-2021-3580[CVE-2021-3580] -* link:https://access.redhat.com/security/cve/CVE-2021-3778[CVE-2021-3778] -* link:https://access.redhat.com/security/cve/CVE-2021-3796[CVE-2021-3796] -* link:https://access.redhat.com/security/cve/CVE-2021-3800[CVE-2021-3800] -* link:https://access.redhat.com/security/cve/CVE-2021-20231[CVE-2021-20231] -* link:https://access.redhat.com/security/cve/CVE-2021-20232[CVE-2021-20232] -* link:https://access.redhat.com/security/cve/CVE-2021-20266[CVE-2021-20266] -* link:https://access.redhat.com/security/cve/CVE-2021-22876[CVE-2021-22876] -* link:https://access.redhat.com/security/cve/CVE-2021-22898[CVE-2021-22898] -* link:https://access.redhat.com/security/cve/CVE-2021-22925[CVE-2021-22925] -* link:https://access.redhat.com/security/cve/CVE-2021-23840[CVE-2021-23840] -* link:https://access.redhat.com/security/cve/CVE-2021-23841[CVE-2021-23841] -* link:https://access.redhat.com/security/cve/CVE-2021-27645[CVE-2021-27645] -* link:https://access.redhat.com/security/cve/CVE-2021-28153[CVE-2021-28153] -* link:https://access.redhat.com/security/cve/CVE-2021-33560[CVE-2021-33560] -* link:https://access.redhat.com/security/cve/CVE-2021-33574[CVE-2021-33574] -* link:https://access.redhat.com/security/cve/CVE-2021-35942[CVE-2021-35942] -* link:https://access.redhat.com/security/cve/CVE-2021-36084[CVE-2021-36084] -* link:https://access.redhat.com/security/cve/CVE-2021-36085[CVE-2021-36085] -* link:https://access.redhat.com/security/cve/CVE-2021-36086[CVE-2021-36086] -* link:https://access.redhat.com/security/cve/CVE-2021-36087[CVE-2021-36087] -==== - -[id="cluster-logging-release-notes-5-2-2_{context}"] -== OpenShift Logging 5.2.2 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3747[RHBA-2021:3747 OpenShift Logging Bug Fix Release 5.2.2] - -[id="openshift-logging-5-2-2-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue.(link:https://issues.redhat.com/browse/LOG-1738[LOG-1738]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay to the termination and restart of collector pods. With this update, Fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1739[LOG-1739]) - -* Before this update, an issue in the bundle manifests prevented installation of the Elasticsearch Operator through OLM on {product-title} 4.9. With this update, a correction to bundle manifests re-enables installation and upgrade in 4.9.(link:https://issues.redhat.com/browse/LOG-1780[LOG-1780]) - -[id="openshift-logging-5-2-2-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2020-25648.html[CVE-2020-25648] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37576.html[CVE-2021-37576] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -* link:https://www.redhat.com/security/data/cve/CVE-2021-38201.html[CVE-2021-38201] -==== - -[id="cluster-logging-release-notes-5-2-1_{context}"] -== OpenShift Logging 5.2.1 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] - -[id="openshift-logging-5-2-1-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, due to an issue in the release pipeline scripts, the value of the `olm.skipRange` field remained unchanged at `5.2.0` instead of reflecting the current release number. This update fixes the pipeline scripts to update the value of this field when the release numbers change. (link:https://issues.redhat.com/browse/LOG-1743[LOG-1743]) - -[id="openshift-logging-5-2-1-CVEs_{context}"] -=== CVEs - -(None) - - -[id="cluster-logging-release-notes-5-2-0_{context}"] -== OpenShift Logging 5.2.0 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3393[RHBA-2021:3393 OpenShift Logging Bug Fix Release 5.2.0] - -[id="openshift-logging-5-2-0-new-features-and-enhancements_{context}"] -=== New features and enhancements - -* With this update, you can forward log data to Amazon CloudWatch, which provides application and infrastructure monitoring. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]. (link:https://issues.redhat.com/browse/LOG-1173[LOG-1173]) - -* With this update, you can forward log data to Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-loki_cluster-logging-external[Forwarding logs to Loki]. (link:https://issues.redhat.com/browse/LOG-684[LOG-684]) - -* With this update, if you use the Fluentd forward protocol to forward log data over a TLS-encrypted connection, now you can use a password-encrypted private key file and specify the passphrase in the Cluster Log Forwarder configuration. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol]. (link:https://issues.redhat.com/browse/LOG-1525[LOG-1525]) - -* This enhancement enables you to use a username and password to authenticate a log forwarding connection to an external Elasticsearch instance. For example, if you cannot use mutual TLS (mTLS) because a third-party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-es_cluster-logging-external[Forwarding logs to an external Elasticsearch instance]. (link:https://issues.redhat.com/browse/LOG-1022[LOG-1022]) - -* With this update, you can collect OVN network policy audit logs for forwarding to a logging server. (link:https://issues.redhat.com/browse/LOG-1526[LOG-1526]) - -* By default, the data model introduced in {product-title} 4.5 gave logs from different namespaces a single index in common. This change made it harder to see which namespaces produced the most logs. -+ -The current release adds namespace metrics to the *Logging* dashboard in the {product-title} console. With these metrics, you can see which namespaces produce logs and how many logs each namespace produces for a given timestamp. -+ -To see these metrics, open the *Administrator* perspective in the {product-title} web console, and navigate to *Observe* -> *Dashboards* -> *Logging/Elasticsearch*. (link:https://issues.redhat.com/browse/LOG-1680[LOG-1680]) - -* The current release, OpenShift Logging 5.2, enables two new metrics: For a given timestamp or duration, you can see the total logs produced or logged by individual containers, and the total logs collected by the collector. These metrics are labeled by namespace, pod, and container name so that you can see how many logs each namespace and pod collects and produces. (link:https://issues.redhat.com/browse/LOG-1213[LOG-1213]) - -[id="openshift-logging-5-2-0-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, when the OpenShift Elasticsearch Operator created index management cronjobs, it added the `POLICY_MAPPING` environment variable twice, which caused the apiserver to report the duplication. This update fixes the issue so that the `POLICY_MAPPING` environment variable is set only once per cronjob, and there is no duplication for the apiserver to report. (link:https://issues.redhat.com/browse/LOG-1130[LOG-1130]) - -* Before this update, suspending an Elasticsearch cluster to zero nodes did not suspend the index-management cronjobs, which put these cronjobs into maximum backoff. Then, after unsuspending the Elasticsearch cluster, these cronjobs stayed halted due to maximum backoff reached. This update resolves the issue by suspending the cronjobs and the cluster. (link:https://issues.redhat.com/browse/LOG-1268[LOG-1268]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers was missing the "chart namespace" label and provided the incorrect metric name, `fluentd_input_status_total_bytes_logged`. With this update, the chart shows the namespace label and the correct metric name, `log_logged_bytes_total`. (link:https://issues.redhat.com/browse/LOG-1271[LOG-1271]) - -* Before this update, if an index management cronjob terminated with an error, it did not report the error exit code: instead, its job status was "complete." This update resolves the issue by reporting the error exit codes of index management cronjobs that terminate with errors. (link:https://issues.redhat.com/browse/LOG-1273[LOG-1273]) - -* The `priorityclasses.v1beta1.scheduling.k8s.io` was removed in 1.22 and replaced by `priorityclasses.v1.scheduling.k8s.io` (`v1beta1` was replaced by `v1`). Before this update, `APIRemovedInNextReleaseInUse` alerts were generated for `priorityclasses` because `v1beta1` was still present . This update resolves the issue by replacing `v1beta1` with `v1`. The alert is no longer generated. (link:https://issues.redhat.com/browse/LOG-1385[LOG-1385]) - -* Previously, the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator did not have the annotation that was required for them to appear in the {product-title} web console list of Operators that can run in a disconnected environment. This update adds the `operators.openshift.io/infrastructure-features: '["Disconnected"]'` annotation to these two Operators so that they appear in the list of Operators that run in disconnected environments. (link:https://issues.redhat.com/browse/LOG-1420[LOG-1420]) - -* Before this update, Red Hat OpenShift Logging Operator pods were scheduled on CPU cores that were reserved for customer workloads on performance-optimized single-node clusters. With this update, cluster logging Operator pods are scheduled on the correct CPU cores. (link:https://issues.redhat.com/browse/LOG-1440[LOG-1440]) - -* Before this update, some log entries had unrecognized UTF-8 bytes, which caused Elasticsearch to reject the messages and block the entire buffered payload. With this update, rejected payloads drop the invalid log entries and resubmit the remaining entries to resolve the issue. (link:https://issues.redhat.com/browse/LOG-1499[LOG-1499]) - -* Before this update, the `kibana-proxy` pod sometimes entered the `CrashLoopBackoff` state and logged the following message `Invalid configuration: cookie_secret must be 16, 24, or 32 bytes to create an AES cipher when pass_access_token == true or cookie_refresh != 0, but is 29 bytes.` The exact actual number of bytes could vary. With this update, the generation of the Kibana session secret has been corrected, and the kibana-proxy pod no longer enters a `CrashLoopBackoff` state due to this error. (link:https://issues.redhat.com/browse/LOG-1446[LOG-1446]) - -* Before this update, the AWS CloudWatch Fluentd plugin logged its AWS API calls to the Fluentd log at all log levels, consuming additional {product-title} node resources. With this update, the AWS CloudWatch Fluentd plugin logs AWS API calls only at the "debug" and "trace" log levels. This way, at the default "warn" log level, Fluentd does not consume extra node resources. (link:https://issues.redhat.com/browse/LOG-1071[LOG-1071]) - -* Before this update, the Elasticsearch OpenDistro security plugin caused user index migrations to fail. This update resolves the issue by providing a newer version of the plugin. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1276[LOG-1276]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers lacked data points. This update resolves the issue, and the dashboard displays all data points. (link:https://issues.redhat.com/browse/LOG-1353[LOG-1353]) - -* Before this update, if you were tuning the performance of the Fluentd log forwarder by adjusting the `chunkLimitSize` and `totalLimitSize` values, the `Setting queued_chunks_limit_size for each buffer to` message reported values that were too low. The current update fixes this issue so that this message reports the correct values. (link:https://issues.redhat.com/browse/LOG-1411[LOG-1411]) - -* Before this update, the Kibana OpenDistro security plugin caused user index migrations to fail. This update resolves the issue by providing a newer version of the plugin. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1558[LOG-1558]) - -* Before this update, using a namespace input filter prevented logs in that namespace from appearing in other inputs. With this update, logs are sent to all inputs that can accept them. (link:https://issues.redhat.com/browse/LOG-1570[LOG-1570]) - -* Before this update, a missing license file for the `viaq/logerr` dependency caused license scanners to abort without success. With this update, the `viaq/logerr` dependency is licensed under Apache 2.0 and the license scanners run successfully. (link:https://issues.redhat.com/browse/LOG-1590[LOG-1590]) - -* Before this update, an incorrect brew tag for `curator5` within the `elasticsearch-operator-bundle` build pipeline caused the pull of an image pinned to a dummy SHA1. With this update, the build pipeline uses the `logging-curator5-rhel8` reference for `curator5`, enabling index management cronjobs to pull the correct image from `registry.redhat.io`. (link:https://issues.redhat.com/browse/LOG-1624[LOG-1624]) - -* Before this update, an issue with the `ServiceAccount` permissions caused errors such as `no permissions for [indices:admin/aliases/get]`. With this update, a permission fix resolves the issue. (link:https://issues.redhat.com/browse/LOG-1657[LOG-1657]) - -* Before this update, the Custom Resource Definition (CRD) for the Red Hat OpenShift Logging Operator was missing the Loki output type, which caused the admission controller to reject the `ClusterLogForwarder` custom resource object. With this update, the CRD includes Loki as an output type so that administrators can configure `ClusterLogForwarder` to send logs to a Loki server. (link:https://issues.redhat.com/browse/LOG-1683[LOG-1683]) - -* Before this update, OpenShift Elasticsearch Operator reconciliation of the `ServiceAccounts` overwrote third-party-owned fields that contained secrets. This issue caused memory and CPU spikes due to frequent recreation of secrets. This update resolves the issue. Now, the OpenShift Elasticsearch Operator does not overwrite third-party-owned fields. (link:https://issues.redhat.com/browse/LOG-1714[LOG-1714]) - -* Before this update, in the `ClusterLogging` custom resource (CR) definition, if you specified a `flush_interval` value but did not set `flush_mode` to `interval`, the Red Hat OpenShift Logging Operator generated a Fluentd configuration. However, the Fluentd collector generated an error at runtime. With this update, the Red Hat OpenShift Logging Operator validates the `ClusterLogging` CR definition and only generates the Fluentd configuration if both fields are specified. (link:https://issues.redhat.com/browse/LOG-1723[LOG-1723]) - -[id="openshift-logging-5-2-0-known-issues_{context}"] -=== Known issues - -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-2-0-deprecated-removed-features_{context}"] -=== Deprecated and removed features - -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-2-0-legacy-forwarding_{context}"] -=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been deprecated - -From {product-title} 4.6 to the present, forwarding logs by using the following legacy methods have been deprecated and will be removed in a future release: - -* Forwarding logs using the legacy Fluentd method -* Forwarding logs using the legacy syslog method - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] - -[id="openshift-logging-5-2-0-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-32740.html[CVE-2021-32740] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -==== diff --git a/logging/cluster-logging-uninstall.adoc b/logging/cluster-logging-uninstall.adoc deleted file mode 100644 index 531a1ccff50f..000000000000 --- a/logging/cluster-logging-uninstall.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-uninstall -[id="cluster-logging-uninstall"] -= Uninstalling OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can remove the {logging} from your {product-title} cluster. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-uninstall.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise,openshift-origin[] -* xref:../storage/understanding-persistent-storage.adoc#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] \ No newline at end of file diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc deleted file mode 100644 index 9b6bee0ea1cc..000000000000 --- a/logging/cluster-logging-upgrading.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-upgrading -[id="cluster-logging-upgrading"] -= Updating OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -[id="cluster-logging-supported-versions"] -== Supported Versions -For version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift#logging[Red Hat OpenShift Container Platform Life Cycle Policy] - -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: - -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x - -To upgrade from a previous version of OpenShift Logging to the current version, you update OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator to their current versions. - -include::modules/cluster-logging-updating-logging-to-current.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-visualizer.adoc b/logging/cluster-logging-visualizer.adoc deleted file mode 100644 index 6ded5d71ff00..000000000000 --- a/logging/cluster-logging-visualizer.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-visualizer -[id="cluster-logging-visualizer-using"] -= Viewing cluster logs by using Kibana -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {logging} includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. - -Using the log visualizer, you can do the following with your data: - -* search and browse the data using the *Discover* tab. -* chart and map the data using the *Visualize* tab. -* create and view custom dashboards using the *Dashboard* tab. - -Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information, -on using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. - -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Log Forwarding API] to configure a pipeline that uses the `default` output for audit logs. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+1] -include::modules/cluster-logging-visualizer-kibana.adoc[leveloffset=+1] diff --git a/logging/cluster-logging.adoc b/logging/cluster-logging.adoc deleted file mode 100644 index 0843a5f73667..000000000000 --- a/logging/cluster-logging.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging"] -= Understanding the {logging-title} -:context: cluster-logging - -toc::[] - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -As a cluster administrator, you can deploy the {logging} to aggregate all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs. The {logging} aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. - -The {logging} aggregates the following types of logs: - -* `application` - Container logs generated by user applications running in the cluster, except infrastructure container applications. -* `infrastructure` - Logs generated by infrastructure components running in the cluster and {product-title} nodes, such as journal logs. Infrastructure components are pods that run in the `openshift*`, `kube*`, or `default` projects. -* `audit` - Logs generated by auditd, the node audit system, which are stored in the */var/log/audit/audit.log* file, and the audit logs from the Kubernetes apiserver and the OpenShift apiserver. - -[NOTE] -==== -Because the internal {product-title} Elasticsearch log store does not provide secure storage for audit logs, audit logs are not stored in the internal Elasticsearch instance by default. If you want to send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API as described in xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Forward audit logs to the log store]. -==== -endif::[] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -ifdef::openshift-rosa,openshift-dedicated[] -include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1] -.Next steps -* See xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] for instructions. -endif::[] - -include::modules/logging-common-terms.adoc[leveloffset=+1] -include::modules/cluster-logging-about.adoc[leveloffset=+1] - -For information, see xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploying[Installing the {logging-title}]. - -include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2] - -include::modules/cluster-logging-collecting-storing-kubernetes-events.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[About collecting and storing Kubernetes events]. - -include::modules/cluster-logging-update-logging.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading[Updating OpenShift Logging]. - -include::modules/cluster-logging-view-cluster-dashboards.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards[About viewing the cluster dashboard]. - -include::modules/cluster-logging-troubleshoot-logging.adoc[leveloffset=+2] - -include::modules/cluster-logging-Uninstall-logging.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-uninstall.adoc#cluster-logging-uninstall_cluster-logging-uninstall[Uninstalling OpenShift Logging]. - -include::modules/cluster-logging-export-fields.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields[About exporting fields]. - -include::modules/cluster-logging-about-components.adoc[leveloffset=+2] - -include::modules/cluster-logging-about-collector.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-collector.adoc#cluster-logging-collector[Configuring the logging collector]. - -include::modules/cluster-logging-about-logstore.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-store[Configuring the log store]. - -include::modules/cluster-logging-about-visualizer.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-visualizer.adoc#cluster-logging-visualizer[Configuring the log visualizer]. - -include::modules/cluster-logging-eventrouter-about.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. - -include::modules/cluster-logging-forwarding-about.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems]. - -include::modules/cluster-logging-feature-reference.adoc[leveloffset=+1] diff --git a/logging/config/_attributes b/logging/config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/config/cluster-logging-collector.adoc b/logging/config/cluster-logging-collector.adoc deleted file mode 100644 index d19b0ce8a72a..000000000000 --- a/logging/config/cluster-logging-collector.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-collector -[id="cluster-logging-collector"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. - -You can configure the CPU and memory limits for the log collector and xref:../../logging/config/cluster-logging-moving-nodes.adoc#cluster-logging-moving[move the log collector pods to specific nodes]. All supported modifications to the log collector can be performed though the `spec.collection.log.fluentd` stanza in the `ClusterLogging` custom resource (CR). - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-maintenance-support-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tuning.adoc[leveloffset=+1] - -include::modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems] diff --git a/logging/config/cluster-logging-configuring-cr.adoc b/logging/config/cluster-logging-configuring-cr.adoc deleted file mode 100644 index 4cb8c3f1d142..000000000000 --- a/logging/config/cluster-logging-configuring-cr.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-configuring-cr -[id="cluster-logging-configuring-cr"] -= About the Cluster Logging custom resource -include::_attributes/common-attributes.adoc[] - -toc::[] - -To configure {logging-title} you customize the `ClusterLogging` custom resource (CR). - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-about-crd.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-configuring.adoc b/logging/config/cluster-logging-configuring.adoc deleted file mode 100644 index 251f7e03d361..000000000000 --- a/logging/config/cluster-logging-configuring.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-configuring -[id="cluster-logging-configuring"] -= Configuring OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -{logging-title-uc} is configurable using a `ClusterLogging` custom resource (CR) deployed -in the `openshift-logging` project. - -The {logging} operator watches for changes to `ClusterLogging` CR, -creates any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete {logging} environment and includes all the components of the logging stack to collect, store and visualize logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - creationTimestamp: '2019-03-20T18:07:02Z' - generation: 1 - name: instance - namespace: openshift-logging -spec: - collection: - logs: - fluentd: - resources: null - type: fluentd - logStore: - elasticsearch: - nodeCount: 3 - redundancyPolicy: SingleRedundancy - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - storage: {} - type: elasticsearch - managementState: Managed - visualization: - kibana: - proxy: - resources: null - replicas: 1 - resources: null - type: kibana ----- -You can configure the following for the {logging}: - -* You can overwrite the image for each {logging} component by modifying the appropriate -environment variable in the `cluster-logging-operator` Deployment. - -* You can specify specific nodes for the logging components using node selectors. - -//// -* You can specify the Log collectors to deploy to each node in a cluster, either Fluentd or Rsyslog. - -[IMPORTANT] -==== -The Rsyslog log collector is currently a Technology Preview feature. -==== -//// - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -// modules/cluster-logging-configuring-image-about.adoc[leveloffset=+1] - -[IMPORTANT] -==== -The logging routes are managed by the {logging-title} Operator and cannot be modified by the user. -==== diff --git a/logging/config/cluster-logging-log-store.adoc b/logging/config/cluster-logging-log-store.adoc deleted file mode 100644 index 0292b0ffe999..000000000000 --- a/logging/config/cluster-logging-log-store.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-store"] -= Configuring the log store -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: cluster-logging-store - -toc::[] - -{logging-title-uc} uses Elasticsearch 6 (ES) to store and organize the log data. - -You can make modifications to your log store, including: - -* storage for your Elasticsearch cluster -* shard replication across data nodes in the cluster, from full replication to no replication -* external access to Elasticsearch data - -//Following paragraph also in modules/cluster-logging-deploy-storage-considerations.adoc - -Elasticsearch is a memory-intensive application. Each Elasticsearch node needs at least 16G of memory for both memory requests and limits, unless you specify otherwise in the `ClusterLogging` custom resource. The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended -or higher memory, up to a maximum of 64G for each Elasticsearch node. - -Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-elasticsearch-audit.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information on the Log Forwarding API, see xref:../../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs using the Log Forwarding API]. - -include::modules/cluster-logging-elasticsearch-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-logstore-limits.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-ha.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-scaledown.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-storage.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc[leveloffset=+1] - -include::modules/cluster-logging-manual-rollout-rolling.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-exposing.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-maintenance-support.adoc b/logging/config/cluster-logging-maintenance-support.adoc deleted file mode 100644 index fba711d9d4be..000000000000 --- a/logging/config/cluster-logging-maintenance-support.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-unsupported -[id="cluster-logging-maintenance-and-support"] -= Maintenance and support -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/cluster-logging-maintenance-support-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] - -include::modules/unmanaged-operators.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-memory.adoc b/logging/config/cluster-logging-memory.adoc deleted file mode 100644 index 154fd68985c4..000000000000 --- a/logging/config/cluster-logging-memory.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-memory -[id="cluster-logging-memory"] -= Configuring CPU and memory limits for {logging} components -include::_attributes/common-attributes.adoc[] - -toc::[] - - -You can configure both the CPU and memory limits for each of the {logging} components as needed. - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-moving-nodes.adoc b/logging/config/cluster-logging-moving-nodes.adoc deleted file mode 100644 index 89ca4a5032be..000000000000 --- a/logging/config/cluster-logging-moving-nodes.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-moving -[id="cluster-logging-moving"] -= Moving {logging} resources with node selectors -include::_attributes/common-attributes.adoc[] - -toc::[] - - - - - -You can use node selectors to deploy the Elasticsearch and Kibana pods to different nodes. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-storage-considerations.adoc b/logging/config/cluster-logging-storage-considerations.adoc deleted file mode 100644 index 83d432692e54..000000000000 --- a/logging/config/cluster-logging-storage-considerations.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-storage -[id="cluster-logging-storage"] -= Configuring {logging} storage -include::_attributes/common-attributes.adoc[] - -toc::[] - - -Elasticsearch is a memory-intensive application. The default {logging} installation deploys 16G of memory for both memory requests and memory limits. -The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended or higher memory. Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-deploy-storage-considerations.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="cluster-logging-storage-considerations-addtl-resources"] -== Additional resources - -* xref:../../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-storage_cluster-logging-store[Configuring persistent storage for the log store] diff --git a/logging/config/cluster-logging-systemd.adoc b/logging/config/cluster-logging-systemd.adoc deleted file mode 100644 index aa6fb1228fc8..000000000000 --- a/logging/config/cluster-logging-systemd.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-systemd -[id="cluster-logging-systemd"] -= Configuring systemd-journald and Fluentd -include::_attributes/common-attributes.adoc[] - -toc::[] - -Because Fluentd reads from the journal, and the journal default settings are very low, journal entries can be lost because the journal cannot keep up with the logging rate from system services. - -We recommend setting `RateLimitIntervalSec=30s` and `RateLimitBurst=10000` (or even higher if necessary) to prevent the journal from losing entries. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-systemd-scaling.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-tolerations.adoc b/logging/config/cluster-logging-tolerations.adoc deleted file mode 100644 index aeb9ce64e70b..000000000000 --- a/logging/config/cluster-logging-tolerations.adoc +++ /dev/null @@ -1,104 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-tolerations -[id="cluster-logging-tolerations"] -= Using tolerations to control OpenShift Logging pod placement -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can use taints and tolerations to ensure that {logging} pods run -on specific nodes and that no other workload can run on those nodes. - -Taints and tolerations are simple `key:value` pair. A taint on a node -instructs the node to repel all pods that do not tolerate the taint. - -The `key` is any string, up to 253 characters and the `value` is any string up to 63 characters. -The string must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. - -.Sample {logging} CR with tolerations -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - tolerations: <1> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: {} - redundancyPolicy: "ZeroRedundancy" - visualization: - type: "kibana" - kibana: - tolerations: <2> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi - replicas: 1 - collection: - logs: - type: "fluentd" - fluentd: - tolerations: <3> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi ----- - -<1> This toleration is added to the Elasticsearch pods. -<2> This toleration is added to the Kibana pod. -<3> This toleration is added to the logging collector pods. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-elasticsearch-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-kibana-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="cluster-logging-tolerations-addtl-resources"] -== Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] \ No newline at end of file diff --git a/logging/config/cluster-logging-visualizer.adoc b/logging/config/cluster-logging-visualizer.adoc deleted file mode 100644 index f1d0670fe26f..000000000000 --- a/logging/config/cluster-logging-visualizer.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-visualizer -[id="cluster-logging-visualizer"] -= Configuring the log visualizer -include::_attributes/common-attributes.adoc[] - -toc::[] - -{product-title} uses Kibana to display the log data collected by the {logging}. - -You can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] - -include::modules/cluster-logging-kibana-scaling.adoc[leveloffset=+1] diff --git a/logging/config/images b/logging/config/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/config/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/config/modules b/logging/config/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/logging/config/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/logging/config/snippets b/logging/config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/dedicated-cluster-deploying.adoc b/logging/dedicated-cluster-deploying.adoc deleted file mode 100644 index 3c618d182d34..000000000000 --- a/logging/dedicated-cluster-deploying.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -:context: dedicated-cluster-deploying -[id="dedicated-cluster-deploying"] -= Installing the Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/dedicated-cluster-install-deploy.adoc[leveloffset=+1] diff --git a/logging/dedicated-cluster-logging.adoc b/logging/dedicated-cluster-logging.adoc deleted file mode 100644 index 9da78a1e6091..000000000000 --- a/logging/dedicated-cluster-logging.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -:context: dedicated-cluster-logging -[id="dedicated-cluster-logging"] -= Configuring the {logging-title} -include::_attributes/common-attributes.adoc[] - -toc::[] - -As a cluster administrator, you can deploy the {logging} to aggregate logs for a range of services. - -{product-title} clusters can perform logging tasks using the OpenShift Elasticsearch Operator. - -The {logging} is configurable using a `ClusterLogging` custom resource (CR) -deployed in the `openshift-logging` project namespace. - -The Red Hat OpenShift Logging Operator watches for changes to `ClusterLogging` CR, creates -any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource -definition (CRD), which defines a complete OpenShift Logging environment and -includes all the components of the logging stack to collect, store, and visualize -logs. - -The `retentionPolicy` parameter in the `ClusterLogging` custom resource (CR) defines how long the internal Elasticsearch log store retains logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200Gi" - redundancyPolicy: "SingleRedundancy" - nodeSelector: - node-role.kubernetes.io/worker: "" - resources: - limits: - memory: 16G - request: - memory: 16G - visualization: - type: "kibana" - kibana: - replicas: 1 - nodeSelector: - node-role.kubernetes.io/worker: "" - collection: - logs: - type: "fluentd" - fluentd: {} - nodeSelector: - node-role.kubernetes.io/worker: "" ----- diff --git a/logging/images b/logging/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/logging/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/logging/modules b/logging/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/logging/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/logging/rosa-viewing-logs.adoc b/logging/rosa-viewing-logs.adoc deleted file mode 100644 index 17f65bafe2ae..000000000000 --- a/logging/rosa-viewing-logs.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="rosa-viewing-logs"] -= Viewing cluster logs in the AWS Console -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: rosa-viewing-logs -toc::[] - -You can view forwarded cluster logs in the AWS console. - -include::modules/rosa-view-cloudwatch-logs.adoc[leveloffset=+1] diff --git a/logging/sd-accessing-the-service-logs.adoc b/logging/sd-accessing-the-service-logs.adoc deleted file mode 100644 index f1ac7ad2e886..000000000000 --- a/logging/sd-accessing-the-service-logs.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="sd-accessing-the-service-logs"] -= Accessing the service logs for {product-title} clusters -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: sd-accessing-the-service-logs - -toc::[] - -[role="_abstract"] -You can view the service logs for your {product-title} -ifdef::openshift-rosa[] - (ROSA) -endif::[] - clusters by using the {cluster-manager-first}. The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//You can view the service logs for your {product-title} (ROSA) clusters by using {cluster-manager-first} or the {cluster-manager} CLI (`ocm`). The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -Additionally, you can add notification contacts for -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] - cluster. Subscribed users receive emails about cluster events that require customer action, known cluster incidents, upgrade maintenance, and other topics. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//include::modules/viewing-the-service-logs.adoc[leveloffset=+1] -//include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+2] -//include::modules/viewing-the-service-logs-cli.adoc[leveloffset=+2] -include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+1] -include::modules/adding-cluster-notification-contacts.adoc[leveloffset=+1] diff --git a/logging/snippets b/logging/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/logging/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/logging/troubleshooting/_attributes b/logging/troubleshooting/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/troubleshooting/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/troubleshooting/cluster-logging-alerts.adoc b/logging/troubleshooting/cluster-logging-alerts.adoc deleted file mode 100644 index 63079a60893e..000000000000 --- a/logging/troubleshooting/cluster-logging-alerts.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-alerts -[id="cluster-logging-alerts"] -= Understanding {logging} alerts -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -All of the logging collector alerts are listed on the Alerting UI of the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}. -endif::[] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-collector-alerts-viewing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For more information on the Alerting UI, see -ifdef::openshift-enterprise,openshift-origin[] -xref:../../monitoring/managing-alerts.adoc#managing-alerts[Managing alerts]. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/monitoring/managing-alerts.html#managing-alerts[Managing alerts]. -endif::[] - -include::modules/cluster-logging-collector-alerts.adoc[leveloffset=+1] -include::modules/cluster-logging-elasticsearch-rules.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-cluster-status.adoc b/logging/troubleshooting/cluster-logging-cluster-status.adoc deleted file mode 100644 index 22b4c854a6a3..000000000000 --- a/logging/troubleshooting/cluster-logging-cluster-status.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-cluster-status -[id="cluster-logging-cluster-status"] -= Viewing OpenShift Logging status -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the Red Hat OpenShift Logging Operator and for a number of {logging} components. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-clo-status.adoc[leveloffset=+1] - -include::modules/cluster-logging-clo-status-comp.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-log-store-status.adoc b/logging/troubleshooting/cluster-logging-log-store-status.adoc deleted file mode 100644 index 49644546fbd8..000000000000 --- a/logging/troubleshooting/cluster-logging-log-store-status.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-elasticsearch -[id="cluster-logging-log-store-status"] -= Viewing the status of the Elasticsearch log store -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the OpenShift Elasticsearch Operator and for a number of Elasticsearch components. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-log-store-status-viewing.adoc[leveloffset=+1] - -include::modules/cluster-logging-log-store-status-comp.adoc[leveloffset=+1] - -include::modules/ref_cluster-logging-elasticsearch-cluster-status.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-must-gather.adoc b/logging/troubleshooting/cluster-logging-must-gather.adoc deleted file mode 100644 index 57f44d87f1b9..000000000000 --- a/logging/troubleshooting/cluster-logging-must-gather.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-must-gather -[id="cluster-logging-must-gather"] -= Collecting logging data for Red Hat Support -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. - -The -ifdef::openshift-enterprise,openshift-origin[] -xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/support/gathering-cluster-data.html#gathering-cluster-data[`must-gather` tool] -endif::[] -enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. - -For prompt support, supply diagnostic information for both {product-title} and OpenShift Logging. - -[NOTE] -==== -Do not use the `hack/logging-dump.sh` script. The script is no longer supported and does not collect data. -==== - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+1] - -[id="cluster-logging-must-gather-prereqs"] -== Prerequisites - -* The {logging} and Elasticsearch must be installed. - -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc b/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc deleted file mode 100644 index f58cef1f008c..000000000000 --- a/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc +++ /dev/null @@ -1,554 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-troubleshooting-for-critical-alerts"] -= Troubleshooting for Critical Alerts -include::_attributes/common-attributes.adoc[] - -toc::[] -:toclevels: 2 - -// WARNING - DO NOT ALTER THE URL PATH OF THIS CONTENT, OR YOU WILL BREAK LINKS FROM ALERT MESSAGES THAT LINK TO THIS CONTENT. -// However, if you must make such changes, consult with the logging team beforehand. - - -[id="elasticsearch-cluster-health-is-red"] -== Elasticsearch Cluster Health is Red - -At least one primary shard and its replicas are not allocated to a node. - -.Troubleshooting - -. Check the Elasticsearch cluster health and verify that the cluster `status` is red. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- health ----- - -. List the nodes that have joined the cluster. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/nodes?v ----- - -. List the Elasticsearch pods and compare them with the nodes in the command output from the previous step. -+ -[source,terminal] ----- -oc -n openshift-logging get pods -l component=elasticsearch ----- - -. If some of the Elasticsearch nodes have not joined the cluster, perform the following steps. - -.. Confirm that Elasticsearch has an elected control plane node. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/master?v ----- - -.. Review the pod logs of the elected control plane node for issues. -+ -[source,terminal] ----- -oc logs -c elasticsearch -n openshift-logging ----- - -.. Review the logs of nodes that have not joined the cluster for issues. -+ -[source,terminal] ----- -oc logs -c elasticsearch -n openshift-logging ----- - -. If all the nodes have joined the cluster, perform the following steps, check if the cluster is in the process of recovering. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/recovery?active_only=true ----- -+ -If there is no command output, the recovery process might be delayed or stalled by pending tasks. - -. Check if there are pending tasks. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- health |grep number_of_pending_tasks ----- - -. If there are pending tasks, monitor their status. -+ -If their status changes and indicates that the cluster is recovering, continue waiting. The recovery time varies according to the size of the cluster and other factors. -+ -Otherwise, if the status of the pending tasks does not change, this indicates that the recovery has stalled. - -. If it seems like the recovery has stalled, check if `cluster.routing.allocation.enable` is set to `none`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/settings?pretty ----- - -. If `cluster.routing.allocation.enable` is set to `none`, set it to `all`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/settings?pretty -X PUT -d '{"persistent": {"cluster.routing.allocation.enable":"all"}}' ----- - -. Check which indices are still red. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/indices?v ----- - -. If any indices are still red, try to clear them by performing the following steps. - -.. Clear the cache. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_cache/clear?pretty ----- - -.. Increase the max allocation retries. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_settings?pretty -X PUT -d '{"index.allocation.max_retries":10}' ----- - -.. Delete all the scroll items. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_search/scroll/_all -X DELETE ----- - -.. Increase the timeout. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_settings?pretty -X PUT -d '{"index.unassigned.node_left.delayed_timeout":"10m"}' ----- - -. If the preceding steps do not clear the red indices, delete the indices individually. - -.. Identify the red index name. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/indices?v ----- - -.. Delete the red index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -. If there are no red indices and the cluster status is red, check for a continuous heavy processing load on a data node. - -.. Check if the Elasticsearch JVM Heap usage is high. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_nodes/stats?pretty ----- -+ -In the command output, review the `node_name.jvm.mem.heap_used_percent` field to determine the JVM Heap usage. - -.. Check for high CPU utilization. - -[role="_additional-resources"] -.Additional resources - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - -[id="elasticsearch-cluster-health-is-yellow"] -== Elasticsearch Cluster Health is Yellow - -Replica shards for at least one primary shard are not allocated to nodes. - -.Troubleshooting - -. Increase the node count by adjusting `nodeCount` in the `ClusterLogging` CR. - -[role="_additional-resources"] -.Additional resources - -//* Search for "Elasticsearch Disk Usage" in xref:../../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[OpenShift Logging dashboards]. -* xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] -* xref:../../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-storage_cluster-logging-store[Configuring persistent storage for the log store] - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - - -// [id="elasticsearch-write-requests-rejection-jumps"] -// == Elasticsearch Write Requests Rejection Jumps -// -// .Troubleshooting -// TBD -// Note for writer: This is a warning alert and we haven't documented troubleshooting steps for warning alerts yet. I guess you can skip this in current release. - -[id="elasticsearch-node-disk-low-watermark-reached"] -== Elasticsearch Node Disk Low Watermark Reached - -Elasticsearch does not allocate shards to nodes that https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[reach the low watermark]. - -.Troubleshooting - -. Identify the node on which Elasticsearch is deployed. -+ -[source,terminal] ----- -oc -n openshift-logging get po -o wide ----- - -. Check if there are `unassigned shards`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/health?pretty | grep unassigned_shards ----- - -. If there are unassigned shards, check the disk space on each node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. Check the `nodes.node_name.fs` field to determine the free disk space on that node. -+ -If the used disk percentage is above 85%, the node has exceeded the low watermark, and shards can no longer be allocated to this node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-node-disk-high-watermark-reached"] -== Elasticsearch Node Disk High Watermark Reached - -Elasticsearch attempts to relocate shards away from a node link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[that has reached the high watermark]. - -.Troubleshooting - -. Identify the node on which Elasticsearch is deployed. -+ -[source,terminal] ----- -oc -n openshift-logging get po -o wide ----- - -. Check the disk space on each node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. Check if the cluster is rebalancing. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/health?pretty | grep relocating_shards ----- -+ -If the command output shows relocating shards, the High Watermark has been exceeded. The default value of the High Watermark is 90%. -+ -The shards relocate to a node with low disk usage that has not crossed any watermark threshold limits. - -. To allocate shards to a particular node, free up some space. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-node-disk-flood-watermark-reached"] -== Elasticsearch Node Disk Flood Watermark Reached - -Elasticsearch enforces a read-only index block on every index that has both of these conditions: - -* One or more shards are allocated to the node. -* One or more disks exceed the https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[flood stage]. - -.Troubleshooting - -. Check the disk space of the Elasticsearch node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- -+ -Check the `nodes.node_name.fs` field to determine the free disk space on that node. - -. If the used disk percentage is above 95%, it signifies that the node has crossed the flood watermark. Writing is blocked for shards allocated on this particular node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - - . Continue freeing up and monitoring the disk space until the used disk space drops below 90%. Then, unblock write to this particular node. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_all/_settings?pretty -X PUT -d '{"index.blocks.read_only_allow_delete": null}' ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-jvm-heap-use-is-high"] -== Elasticsearch JVM Heap Use is High - -The Elasticsearch node JVM Heap memory used is above 75%. - -.Troubleshooting - -Consider https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings[increasing the heap size]. - -[id="aggregated-logging-system-cpu-is-high"] -== Aggregated Logging System CPU is High - -System CPU usage on the node is high. - -.Troubleshooting - -Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -[id="elasticsearch-process-cpu-is-high"] -== Elasticsearch Process CPU is High - -Elasticsearch process CPU usage on the node is high. - -.Troubleshooting - -Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -[id="elasticsearch-disk-space-is-running-low"] -== Elasticsearch Disk Space is Running Low - -The Elasticsearch Cluster is predicted to be out of disk space within the next 6 hours based on current disk usage. - -.Troubleshooting - -. Get the disk space of the Elasticsearch node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. In the command output, check the `nodes.node_name.fs` field to determine the free disk space on that node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - -* Search for "ElasticsearchDiskSpaceRunningLow" in xref:../../logging/troubleshooting/cluster-logging-alerts.adoc#cluster-logging-elasticsearch-rules_cluster-logging-alerts[About Elasticsearch alerting rules]. - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - - - -[id="elasticsearch-filedescriptor-usage-is-high"] -== Elasticsearch FileDescriptor Usage is high - -Based on current usage trends, the predicted number of file descriptors on the node is insufficient. - -.Troubleshooting - -Check and, if needed, configure the value of `max_file_descriptors` for each node, as described in the Elasticsearch link:https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html[File descriptors] topic. - -[role="_additional-resources"] -.Additional resources - -* Search for "ElasticsearchHighFileDescriptorUsage" in xref:../../logging/troubleshooting/cluster-logging-alerts.adoc#cluster-logging-elasticsearch-rules_cluster-logging-alerts[About Elasticsearch alerting rules]. -* Search for "File Descriptors In Use" in xref:../../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[OpenShift Logging dashboards]. - - - -// Follow up items: - -// `oc edit es elasticsearch` is not documented anywhere outside this topic. diff --git a/logging/troubleshooting/images b/logging/troubleshooting/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/troubleshooting/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/troubleshooting/modules b/logging/troubleshooting/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/troubleshooting/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/troubleshooting/snippets b/logging/troubleshooting/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/troubleshooting/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_5/_attributes b/logging/v5_5/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_5/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_5/images b/logging/v5_5/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_5/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_5/logging-5-5-administration.adoc b/logging/v5_5/logging-5-5-administration.adoc deleted file mode 100644 index 338243b3df90..000000000000 --- a/logging/v5_5/logging-5-5-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-5"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-architecture.adoc b/logging/v5_5/logging-5-5-architecture.adoc deleted file mode 100644 index d6ea69fcb56d..000000000000 --- a/logging/v5_5/logging-5-5-architecture.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-5"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-architecture - -toc::[] - -:context: logging-5-5-architecture -include::modules/logging-architecture-overview.adoc[leveloffset=+1,lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-configuration.adoc b/logging/v5_5/logging-5-5-configuration.adoc deleted file mode 100644 index e78d97a6c6ed..000000000000 --- a/logging/v5_5/logging-5-5-configuration.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-5"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-getting-started.adoc b/logging/v5_5/logging-5-5-getting-started.adoc deleted file mode 100644 index bc2831906ad7..000000000000 --- a/logging/v5_5/logging-5-5-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-5-5-getting-started"] -= Getting started with logging 5.5 - -:context: logging-5-5-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_5/logging-5-5-reference.adoc b/logging/v5_5/logging-5-5-reference.adoc deleted file mode 100644 index a67d4f9013c4..000000000000 --- a/logging/v5_5/logging-5-5-reference.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-5"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-reference - -toc::[] diff --git a/logging/v5_5/logging-5-5-release-notes.adoc b/logging/v5_5/logging-5-5-release-notes.adoc deleted file mode 100644 index 284b8b5f8cc9..000000000000 --- a/logging/v5_5/logging-5-5-release-notes.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-5"] -= Logging 5.5 Release Notes -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::modules/logging-rn-5.5.10.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.9.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.8.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.7.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.6.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.5.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.4.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.0.adoc[leveloffset=+1] diff --git a/logging/v5_5/modules b/logging/v5_5/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_5/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_5/snippets b/logging/v5_5/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_5/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_6/_attributes b/logging/v5_6/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_6/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_6/images b/logging/v5_6/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_6/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_6/logging-5-6-administration.adoc b/logging/v5_6/logging-5-6-administration.adoc deleted file mode 100644 index 14b7b31be3e3..000000000000 --- a/logging/v5_6/logging-5-6-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-6"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-architecture.adoc b/logging/v5_6/logging-5-6-architecture.adoc deleted file mode 100644 index 08c96effd8bc..000000000000 --- a/logging/v5_6/logging-5-6-architecture.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-6"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-architecture - -toc::[] - - -include::modules/logging-architecture-overview.adoc[leveloffset=+1,lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-configuration.adoc b/logging/v5_6/logging-5-6-configuration.adoc deleted file mode 100644 index 35b9787a8d47..000000000000 --- a/logging/v5_6/logging-5-6-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-6"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-getting-started.adoc b/logging/v5_6/logging-5-6-getting-started.adoc deleted file mode 100644 index c4cb0a606a71..000000000000 --- a/logging/v5_6/logging-5-6-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-getting-started-5-6"] -= Getting started with logging 5.6 - -:context: logging-5-6-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_6/logging-5-6-reference.adoc b/logging/v5_6/logging-5-6-reference.adoc deleted file mode 100644 index 704514dbc3ce..000000000000 --- a/logging/v5_6/logging-5-6-reference.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-6"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-reference - -toc::[] - -:leveloffset: +1 - -include::modules/logging-feature-reference-5.6.adoc[] - -include::modules/logging-5.6-api-ref.adoc[] - -:leveloffset: -1 diff --git a/logging/v5_6/logging-5-6-release-notes.adoc b/logging/v5_6/logging-5-6-release-notes.adoc deleted file mode 100644 index 227c5485d72a..000000000000 --- a/logging/v5_6/logging-5-6-release-notes.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-6"] -= Logging 5.6 Release Notes -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.6.8.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.5.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.4.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.0.adoc[leveloffset=+1] diff --git a/logging/v5_6/modules b/logging/v5_6/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_6/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_6/snippets b/logging/v5_6/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_6/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_7/_attributes b/logging/v5_7/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_7/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_7/images b/logging/v5_7/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_7/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_7/logging-5-7-administration.adoc b/logging/v5_7/logging-5-7-administration.adoc deleted file mode 100644 index 1f54801bdf44..000000000000 --- a/logging/v5_7/logging-5-7-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-7"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_7/logging-5-7-architecture.adoc b/logging/v5_7/logging-5-7-architecture.adoc deleted file mode 100644 index 86b1c12c5a04..000000000000 --- a/logging/v5_7/logging-5-7-architecture.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-7"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-architecture - -toc::[] - -:context: logging-5-7-architecture-overview -include::modules/logging-architecture-overview.adoc[lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] - -include::snippets/logging-5.7-outputs-snip.adoc[] diff --git a/logging/v5_7/logging-5-7-configuration.adoc b/logging/v5_7/logging-5-7-configuration.adoc deleted file mode 100644 index beeaef734e50..000000000000 --- a/logging/v5_7/logging-5-7-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-7"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_7/logging-5-7-getting-started.adoc b/logging/v5_7/logging-5-7-getting-started.adoc deleted file mode 100644 index 50fa6948a13d..000000000000 --- a/logging/v5_7/logging-5-7-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-getting-started-5-7"] -= Getting started with logging 5.7 - -:context: logging-5-7-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_7/logging-5-7-reference.adoc b/logging/v5_7/logging-5-7-reference.adoc deleted file mode 100644 index 0d9019e2fb4f..000000000000 --- a/logging/v5_7/logging-5-7-reference.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-7"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-reference - -toc::[] diff --git a/logging/v5_7/logging-5-7-release-notes.adoc b/logging/v5_7/logging-5-7-release-notes.adoc deleted file mode 100644 index 3576f0c474a0..000000000000 --- a/logging/v5_7/logging-5-7-release-notes.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-7"] -= Logging 5.7 Release Notes -:context: logging-5.7-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.7.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.0.adoc[leveloffset=+1] diff --git a/logging/v5_7/modules b/logging/v5_7/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_7/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_7/snippets b/logging/v5_7/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_7/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/viewing-resource-logs.adoc b/logging/viewing-resource-logs.adoc deleted file mode 100644 index d0d19b48b600..000000000000 --- a/logging/viewing-resource-logs.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="vewing-resource-logs"] -= Viewing logs for a resource -include::_attributes/common-attributes.adoc[] -:context: viewing-resource-logs - -toc::[] - -You can view the logs for various resources, such as builds, deployments, and pods by using the OpenShift CLI (oc) and the web console. - -[NOTE] -==== -Resource logs are a default feature that provides limited log viewing capability. To enhance your log retrieving and viewing experience, it is recommended that you install xref:../logging/cluster-logging.adoc#cluster-logging[OpenShift Logging]. The {logging} aggregates all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs, into a dedicated log store. You can then query, discover, and visualize your log data through the xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer-using[Kibana interface]. Resource logs do not access the {logging} log store. -==== - -include::modules/viewing-resource-logs-cli-console.adoc[leveloffset=+1] diff --git a/machine_management/_attributes b/machine_management/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/machine_management/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/machine_management/adding-rhel-compute.adoc b/machine_management/adding-rhel-compute.adoc deleted file mode 100644 index 0f0d484458e1..000000000000 --- a/machine_management/adding-rhel-compute.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-rhel-compute"] -= Adding RHEL compute machines to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: adding-rhel-compute - -toc::[] - -In {product-title}, you can add {op-system-base-full} compute machines to a user-provisioned infrastructure cluster or an installation-provisioned infrastructure cluster on the `x86_64` architecture. You can use {op-system-base} as the operating system only on compute machines. - -include::modules/rhel-compute-overview.adoc[leveloffset=+1] - -include::modules/rhel-compute-requirements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-deleting_nodes-nodes-working[Deleting nodes] - - -include::modules/csr-management.adoc[leveloffset=+2] - -[id="adding-rhel-compute-preparing-image-cloud"] -== Preparing an image for your cloud - -Amazon Machine Images (AMI) are required because various image formats cannot be used directly by AWS. You may use the AMIs that Red Hat has provided, or you can manually import your own images. The AMI must exist before the EC2 instance can be provisioned. You will need a valid AMI ID so that the correct {op-system-base} version needed for the compute machines is selected. - -include::modules/rhel-images-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* You may also manually link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/image_builder_guide/sect-documentation-image_builder-chapter5-section_2[import {op-system-base} images to AWS]. - -include::modules/rhel-preparing-playbook-machine.adoc[leveloffset=+1] - -include::modules/rhel-preparing-node.adoc[leveloffset=+1] - -include::modules/rhel-attaching-instance-aws.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* See xref:../installing/installing_aws/installing-aws-account.adoc#installation-aws-permissions-iam-roles_installing-aws-account[Required AWS permissions for IAM roles]. - -include::modules/rhel-worker-tag.adoc[leveloffset=+1] - -include::modules/rhel-adding-node.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/rhel-ansible-parameters.adoc[leveloffset=+1] - -include::modules/rhel-removing-rhcos.adoc[leveloffset=+2] diff --git a/machine_management/applying-autoscaling.adoc b/machine_management/applying-autoscaling.adoc deleted file mode 100644 index afcb53b8b76e..000000000000 --- a/machine_management/applying-autoscaling.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: ASSEMBLY -[id="applying-autoscaling"] -= Applying autoscaling to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: applying-autoscaling - -toc::[] - -Applying autoscaling to an {product-title} cluster involves deploying a cluster autoscaler and then deploying machine autoscalers for each machine type in your cluster. - -[IMPORTANT] -==== -You can configure the cluster autoscaler only in clusters where the machine API is operational. -==== - -include::modules/cluster-autoscaler-about.adoc[leveloffset=+1] - -[id="configuring-clusterautoscaler"] -== Configuring the cluster autoscaler - -First, deploy the cluster autoscaler to manage automatic resource scaling in your {product-title} cluster. - -[NOTE] -==== -Because the cluster autoscaler is scoped to the entire cluster, you can make only one cluster autoscaler for the cluster. -==== - -include::modules/cluster-autoscaler-cr.adoc[leveloffset=+2] - -:FeatureName: cluster autoscaler -:FeatureResourceName: ClusterAutoscaler -include::modules/deploying-resource.adoc[leveloffset=+2] - -== Next steps - -* After you configure the cluster autoscaler, you must configure at least one machine autoscaler. - -include::modules/machine-autoscaler-about.adoc[leveloffset=+1] - -[id="configuring-machineautoscaler"] -== Configuring the machine autoscalers - -After you deploy the cluster autoscaler, deploy `MachineAutoscaler` resources that reference the compute machine sets that are used to scale the cluster. - -[IMPORTANT] -==== -You must deploy at least one `MachineAutoscaler` resource after you deploy the `ClusterAutoscaler` resource. -==== - -[NOTE] -==== -You must configure separate resources for each compute machine set. Remember that compute machine sets are different in each region, so consider whether you want to enable machine scaling in multiple regions. The compute machine set that you scale must have at least one machine in it. -==== - -include::modules/machine-autoscaler-cr.adoc[leveloffset=+2] - -:FeatureName: machine autoscaler -:FeatureResourceName: MachineAutoscaler -include::modules/deploying-resource.adoc[leveloffset=+2] - -[role="_additional-resources"] -== Additional resources - -* For more information about pod priority, see xref:../nodes/pods/nodes-pods-priority.adoc#nodes-pods-priority[Including pod priority in pod scheduling decisions in {product-title}]. diff --git a/machine_management/capi-machine-management.adoc b/machine_management/capi-machine-management.adoc deleted file mode 100644 index 828aa46563d7..000000000000 --- a/machine_management/capi-machine-management.adoc +++ /dev/null @@ -1,103 +0,0 @@ -:_content-type: ASSEMBLY -[id="capi-machine-management"] -= Managing machines with the Cluster API -include::_attributes/common-attributes.adoc[] -:context: capi-machine-management - -toc::[] - -:FeatureName: Managing machines with the Cluster API -include::snippets/technology-preview.adoc[] - -The link:https://cluster-api.sigs.k8s.io/[Cluster API] is an upstream project that is integrated into {product-title} as a Technology Preview for Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure clusters. You can use the Cluster API to create and manage compute machine sets and compute machines in your {product-title} cluster. This capability is in addition or an alternative to managing machines with the Machine API. - -For {product-title} {product-version} clusters, you can use the Cluster API to perform node host provisioning management actions after the cluster installation finishes. This system enables an elastic, dynamic provisioning method on top of public or private cloud infrastructure. - -With the Cluster API Technology Preview, you can create compute machines and compute machine sets on {product-title} clusters for supported providers. You can also explore the features that are enabled by this implementation that might not be available with the Machine API. - -[discrete] -[id="cluster-api-benefits_{context}"] -== Benefits - -By using the Cluster API, {product-title} users and developers are able to realize the following advantages: - -* The option to use upstream community Cluster API infrastructure providers which might not be supported by the Machine API. - -* The opportunity to collaborate with third parties who maintain machine controllers for infrastructure providers. - -* The ability to use the same set of Kubernetes tools for infrastructure management in {product-title}. - -* The ability to create compute machine sets by using the Cluster API that support features that are not available with the Machine API. - -[discrete] -[id="capi-tech-preview-limitations"] -== Limitations - -Using the Cluster API to manage machines is a Technology Preview feature and has the following limitations: - -* Only AWS, GCP, and Azure clusters are supported. - -* To use this feature, you must enable the `TechPreviewNoUpgrade` xref:../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling-features-about_nodes-cluster-enabling[feature set]. Enabling this feature set cannot be undone and prevents minor version updates. - -* You must create the primary resources that the Cluster API requires manually. - -* You cannot manage control plane machines by using the Cluster API. - -* Migration of existing compute machine sets created by the Machine API to Cluster API compute machine sets is not supported. - -* Full feature parity with the Machine API is not available. - -//Cluster API architecture -include::modules/cluster-api-architecture.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-capi-operator_cluster-operators-ref[Cluster CAPI Operator] - -[id="capi-sample-yaml-files"] -== Sample YAML files - -For the Cluster API Technology Preview, you must create the primary resources that the Cluster API requires manually. The following example YAML files show how to make these resources work together and configure settings for the machines that they create that are appropriate for your environment. - -//Sample YAML for a CAPI cluster resource -include::modules/capi-yaml-cluster.adoc[leveloffset=+2] - -The remaining Cluster API resources are provider-specific. Refer to the example YAML files for your cluster: - -* xref:../machine_management/capi-machine-management.adoc#capi-sample-yaml-files-aws[Sample YAML files for configuring Amazon Web Services clusters] - -* xref:../machine_management/capi-machine-management.adoc#capi-sample-yaml-files-gcp[Sample YAML files for configuring Google Cloud Platform clusters] - -[id="capi-sample-yaml-files-aws"] -=== Sample YAML files for configuring Amazon Web Services clusters - -Some Cluster API resources are provider-specific. The following example YAML files show configurations for an Amazon Web Services (AWS) cluster. - -//Sample YAML for a CAPI AWS provider resource -include::modules/capi-yaml-infrastructure-aws.adoc[leveloffset=+3] - -//Sample YAML for CAPI AWS machine template resource -include::modules/capi-yaml-machine-template-aws.adoc[leveloffset=+3] - -//Sample YAML for a CAPI AWS compute machine set resource -include::modules/capi-yaml-machine-set-aws.adoc[leveloffset=+3] - -[id="capi-sample-yaml-files-gcp"] -=== Sample YAML files for configuring Google Cloud Platform clusters - -Some Cluster API resources are provider-specific. The following example YAML files show configurations for a Google Cloud Platform (GCP) cluster. - -//Sample YAML for a CAPI GCP provider resource -include::modules/capi-yaml-infrastructure-gcp.adoc[leveloffset=+3] - -//Sample YAML for CAPI GCP machine template resource -include::modules/capi-yaml-machine-template-gcp.adoc[leveloffset=+3] - -//Sample YAML for a CAPI GCP compute machine set resource -include::modules/capi-yaml-machine-set-gcp.adoc[leveloffset=+3] - -//Creating a CAPI compute machine set -include::modules/capi-machine-set-creating.adoc[leveloffset=+1] - -//Troubleshooting clusters that use the Cluster API -include::modules/capi-troubleshooting.adoc[leveloffset=+1] diff --git a/machine_management/control_plane_machine_management/_attributes b/machine_management/control_plane_machine_management/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/control_plane_machine_management/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-about.adoc b/machine_management/control_plane_machine_management/cpmso-about.adoc deleted file mode 100644 index cd2368cff6ac..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-about.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-about"] -= About control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-about - -toc::[] - -With control plane machine sets, you can automate management of the control plane machine resources within your {product-title} cluster. - -[IMPORTANT] -==== -Control plane machine sets cannot manage compute machines, and compute machine sets cannot manage control plane machines. -==== - -Control plane machine sets provide for control plane machines similar management capabilities as compute machine sets provide for compute machines. However, these two types of machine sets are separate custom resources defined within the Machine API and have several fundamental differences in their architecture and functionality. - -//Control Plane Machine Set Operator overview -include::modules/cpmso-overview.adoc[leveloffset=+1] - -[id="cpmso-limitations_{context}"] -== Limitations - -The Control Plane Machine Set Operator has the following limitations: - -* The Operator requires the Machine API Operator to be operational and is therefore not supported on clusters with manually provisioned machines. When installing a {product-title} cluster with manually provisioned machines for a platform that creates an active generated `ControlPlaneMachineSet` custom resource (CR), you must remove the Kubernetes manifest files that define the control plane machine set as instructed in the installation process. - -* Only Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere clusters are supported. - -* Only clusters with three control plane machines are supported. - -* Horizontal scaling of the control plane is not supported. - -* Deploying Azure control plane machines on Ephemeral OS disks increases risk for data loss and is not supported. - -* Deploying control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs is not supported. -+ -[IMPORTANT] -==== -Attempting to deploy control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs might cause the cluster to lose etcd quorum. A cluster that loses all control plane machines simultaneously is unrecoverable. -==== - -* Making changes to the control plane machine set during or prior to installation is not supported. You must make any changes to the control plane machine set only after installation. - -[role="_additional-resources"] -[id="additional-resources_cpmso-about"] -== Additional resources - -* xref:../../operators/operator-reference.adoc#control-plane-machine-set-operator_cluster-operators-ref[Control Plane Machine Set Operator reference] -* xref:../../rest_api/machine_apis/controlplanemachineset-machine-openshift-io-v1.adoc#controlplanemachineset-machine-openshift-io-v1[`ControlPlaneMachineSet` custom resource] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-configuration.adoc b/machine_management/control_plane_machine_management/cpmso-configuration.adoc deleted file mode 100644 index 1030ebec5bc1..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-configuration.adoc +++ /dev/null @@ -1,96 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-configuration"] -= Control plane machine set configuration -include::_attributes/common-attributes.adoc[] -:context: cpmso-configuration - -toc::[] - -These example YAML snippets show the base structure for a control plane machine set custom resource (CR) and platform-specific samples for provider specification and failure domain configurations. - -//Sample YAML for a control plane machine set custom resource -include::modules/cpmso-yaml-sample-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-getting-started[Getting started with control plane machine sets] - -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Updating the control plane configuration] - -[discrete] -[id="cpmso-sample-yaml-provider-specific_{context}"] -=== Provider-specific configuration - -The `` and `` sections of the control plane machine set resources are provider-specific. Refer to the example YAML for your cluster: - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[Sample YAML snippets for configuring Amazon Web Services clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[Sample YAML snippets for configuring Google Cloud Platform clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Sample YAML snippets for configuring Microsoft Azure clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-nutanix_cpmso-configuration[Sample YAML snippets for configuring Nutanix clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[Sample YAML snippets for configuring VMware vSphere clusters] - -[id="cpmso-sample-yaml-aws_{context}"] -== Sample YAML for configuring Amazon Web Services clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for an Amazon Web Services (AWS) cluster. - -//Sample AWS provider specification -include::modules/cpmso-yaml-provider-spec-aws.adoc[leveloffset=+2] - -//Sample AWS failure domain configuration -include::modules/cpmso-yaml-failure-domain-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-aws_cpmso-using[Enabling Amazon Web Services features for control plane machines] - -[id="cpmso-sample-yaml-gcp_{context}"] -== Sample YAML for configuring Google Cloud Platform clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for a Google Cloud Platform (GCP) cluster. - -//Sample GCP provider specification -include::modules/cpmso-yaml-provider-spec-gcp.adoc[leveloffset=+2] - -//Sample GCP failure domain configuration -include::modules/cpmso-yaml-failure-domain-gcp.adoc[leveloffset=+2] -//// -//To be added in a later PR -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-gcp_cpmso-using[Enabling Google Cloud Platform features for control plane machines] -//// -[id="cpmso-sample-yaml-azure_{context}"] -== Sample YAML for configuring Microsoft Azure clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for an Azure cluster. - -//Sample Azure provider specification -include::modules/cpmso-yaml-provider-spec-azure.adoc[leveloffset=+2] - -//Sample Azure failure domain configuration -include::modules/cpmso-yaml-failure-domain-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-azure_cpmso-using[Enabling Microsoft Azure features for control plane machines] - -[id="cpmso-sample-yaml-nutanix_{context}"] -== Sample YAML for configuring Nutanix clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippet shows a provider specification configuration for a Nutanix cluster. - -//Sample Nutanix provider specification -include::modules/cpmso-yaml-provider-spec-nutanix.adoc[leveloffset=+2] - -[id="cpmso-sample-yaml-vsphere_{context}"] -== Sample YAML for configuring VMware vSphere clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippet shows a provider specification configuration for a VMware vSphere cluster. - -//Sample VMware vSphere provider specification -include::modules/cpmso-yaml-provider-spec-vsphere.adoc[leveloffset=+2] diff --git a/machine_management/control_plane_machine_management/cpmso-disabling.adoc b/machine_management/control_plane_machine_management/cpmso-disabling.adoc deleted file mode 100644 index 0dfdf337985d..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-disabling.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-disabling"] -= Disabling the control plane machine set -include::_attributes/common-attributes.adoc[] -:context: cpmso-disabling - -toc::[] - -The `.spec.state` field in an activated `ControlPlaneMachineSet` custom resource (CR) cannot be changed from `Active` to `Inactive`. To disable the control plane machine set, you must delete the CR so that it is removed from the cluster. - -When you delete the CR, the Control Plane Machine Set Operator performs cleanup operations and disables the control plane machine set. The Operator then removes the CR from the cluster and creates an inactive control plane machine set with default settings. - -//Deleting the control plane machine set -include::modules/cpmso-deleting.adoc[leveloffset=+1] - -//Checking the control plane machine set custom resource status -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -[id="cpmso-reenabling_{context}"] -== Re-enabling the control plane machine set - -To re-enable the control plane machine set, you must ensure that the configuration in the CR is correct for your cluster and activate it. - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[Activating the control plane machine set custom resource] diff --git a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc b/machine_management/control_plane_machine_management/cpmso-getting-started.adoc deleted file mode 100644 index 7e52420a13d3..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-getting-started"] -= Getting started with control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-getting-started - -toc::[] - -The process for getting started with control plane machine sets depends on the state of the `ControlPlaneMachineSet` custom resource (CR) in your cluster. - -Clusters with an active generated CR:: Clusters that have a generated CR with an active state use the control plane machine set by default. No administrator action is required. - -Clusters with an inactive generated CR:: For clusters that include an inactive generated CR, you must review the CR configuration and xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[activate the CR]. - -Clusters without a generated CR:: For clusters that do not include a generated CR, you must xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-creating-cr_cpmso-getting-started[create and activate a CR] with the appropriate configuration for your cluster. - -If you are uncertain about the state of the `ControlPlaneMachineSet` CR in your cluster, you can xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-checking-status_cpmso-getting-started[verify the CR status]. - -[id="cpmso-platform-matrix_{context}"] -== Supported cloud providers - -In {product-title} {product-version}, the control plane machine set is supported for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere clusters. - -The status of the control plane machine set after installation depends on your cloud provider and the version of {product-title} that you installed on your cluster. - -.Control plane machine set implementation for {product-title} {product-version} -[cols="<.^5,^.^4,^.^4,^.^4"] -|==== -|Cloud provider |Active by default |Generated CR |Manual CR required - -|Amazon Web Services (AWS) -|X ^[1]^ -|X -| - -|Google Cloud Platform (GCP) -|X ^[2]^ -|X -| - -|Microsoft Azure -|X ^[2]^ -|X -| - -|Nutanix -|X -|X -| - -|VMware vSphere -| -| -|X -|==== -[.small] --- -1. AWS clusters that are upgraded from version 4.11 or earlier require xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[CR activation]. -2. GCP and Azure clusters that are upgraded from version 4.12 or earlier require xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[CR activation]. --- - -//Checking the control plane machine set custom resource state -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -//Activating the control plane machine set custom resource -include::modules/cpmso-activating.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-configuration[Control plane machine set configuration] - -//Creating a control plane machine set custom resource -include::modules/cpmso-creating-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Updating the control plane configuration] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-configuration[Control plane machine set configuration] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[Sample YAML for configuring Amazon Web Services clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[Sample YAML for configuring Google Cloud Platform clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Sample YAML for configuring Microsoft Azure clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[Sample YAML for configuring VMware vSphere clusters] diff --git a/machine_management/control_plane_machine_management/cpmso-resiliency.adoc b/machine_management/control_plane_machine_management/cpmso-resiliency.adoc deleted file mode 100644 index edf92eb33e7d..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-resiliency.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-resiliency"] -= Control plane resiliency and recovery -include::_attributes/common-attributes.adoc[] -:context: cpmso-resiliency - -toc::[] - -You can use the control plane machine set to improve the resiliency of the control plane for your {product-title} cluster. - -[id="cpmso-failure-domains_{context}"] -== High availability and fault tolerance with failure domains - -When possible, the control plane machine set spreads the control plane machines across multiple failure domains. This configuration provides high availability and fault tolerance within the control plane. This strategy can help protect the control plane when issues arise within the infrastructure provider. - -//Failure domain platform support and configuration -include::modules/cpmso-failure-domains-provider.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-aws_cpmso-configuration[Sample Amazon Web Services failure domain configuration] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-gcp_cpmso-configuration[Sample Google Cloud Platform failure domain configuration] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-azure_cpmso-configuration[Sample Microsoft Azure failure domain configuration] - -//Balancing control plane machines -include::modules/cpmso-failure-domains-balancing.adoc[leveloffset=+2] - -//Recovery of the failed control plane machines -include::modules/cpmso-control-plane-recovery.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[Deploying machine health checks] - -//Quorum protection with machine lifecycle hooks -include::modules/machine-lifecycle-hook-deletion-etcd.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc b/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc deleted file mode 100644 index 0a04b3553105..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-troubleshooting"] -= Troubleshooting the control plane machine set -include::_attributes/common-attributes.adoc[] -:context: cpmso-troubleshooting - -toc::[] - -Use the information in this section to understand and recover from issues you might encounter. - -//Checking the control plane machine set custom resource status -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[Activating the control plane machine set custom resource] -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-creating-cr_cpmso-getting-started[Creating a control plane machine set custom resource] - -//Adding a missing Azure internal load balancer -include::modules/cpmso-ts-ilb-missing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-provider-spec-azure_cpmso-configuration[Sample Azure provider specification] - -//Recovering a degraded etcd Operator after a machine health check operation -include::modules/cpmso-ts-mhc-etcd-degraded.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-using.adoc b/machine_management/control_plane_machine_management/cpmso-using.adoc deleted file mode 100644 index 52128df5138a..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-using.adoc +++ /dev/null @@ -1,114 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-using"] -= Managing control plane machines with control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-using - -toc::[] - -Control plane machine sets automate several essential aspects of control plane management. - -//Vertical resizing of the control plane -//include::modules/cpmso-feat-vertical-resize.adoc[leveloffset=+1] - -//Updating the control plane configuration -include::modules/cpmso-feat-config-update.adoc[leveloffset=+1] - -//Automatically updating the control plane configuration -include::modules/cpmso-feat-auto-update.adoc[leveloffset=+2] - -//Testing changes to the control plane configuration -include::modules/cpmso-feat-test-changes.adoc[leveloffset=+2] - -[id="cpmso-supported-features-aws_{context}"] -== Enabling Amazon Web Services features for control plane machines - -You can enable Amazon Web Services (AWS) features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -:context: cpmso-using-aws -//Restricting the API server to private (AWS control plane machine set version) -include::modules/private-clusters-setting-api-private.adoc[leveloffset=+2] -:context: cpmso-using - -//Selecting a larger Amazon Web Services instance type for control plane machines -include::modules/cpms-changing-aws-instance-type.adoc[leveloffset=+2] - -//Machine sets that enable the Amazon EC2 Instance Metadata Service -include::modules/machineset-imds-options.adoc[leveloffset=+2] - -//Creating machines that use the Amazon EC2 Instance Metadata Service -include::modules/machineset-creating-imds-options.adoc[leveloffset=+3] - -//Machine sets that deploy machines as Dedicated Instances -include::modules/machineset-dedicated-instances.adoc[leveloffset=+2] - -//Creating Dedicated Instances by using machine sets -include::modules/machineset-creating-dedicated-instances.adoc[leveloffset=+3] - -[id="cpmso-supported-features-azure_{context}"] -== Enabling Microsoft Azure features for control plane machines - -You can enable Microsoft Azure features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -:context: cpmso-using-azure -//Restricting the API server to private (Azure control plane machine set version) -include::modules/private-clusters-setting-api-private.adoc[leveloffset=+2] -:context: cpmso-using - -//Selecting an Azure Marketplace image -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+2] - -//Enabling Azure boot diagnostics -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+2] - -//Machine sets that deploy machines on ultra disks as data disks -include::modules/machineset-azure-ultra-disk.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disks[Microsoft Azure ultra disks documentation] - -//Creating machines on ultra disks by using machine sets -include::modules/machineset-creating-azure-ultra-disk.adoc[leveloffset=+3] - -//Troubleshooting resources for machine sets that enable ultra disks -include::modules/machineset-troubleshooting-azure-ultra-disk.adoc[leveloffset=+3] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+2] - -// Accelerated Networking for Microsoft Azure VMs -include::modules/machineset-azure-accelerated-networking.adoc[leveloffset=+2] - -//Not applicable for 4.12, possibly 4.13? -//[role="_additional-resources"] -//.Additional resources -//* xref:../../installing/installing_azure/installing-azure-customizations.adoc#machineset-azure-enabling-accelerated-networking-new-install_installing-azure-customizations[Enabling Accelerated Networking during installation] - -// Enabling Accelerated Networking on an existing Microsoft Azure cluster -include::modules/machineset-azure-enabling-accelerated-networking-existing.adoc[leveloffset=+3] - -[id="cpmso-supported-features-gcp_{context}"] -== Enabling Google Cloud Platform features for control plane machines - -You can enable Google Cloud Platform (GCP) features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -//Note: GCP GPU features should be compatible with CPMS, but dev cannot think of a use case. Leaving them out to keep things less cluttered. If a customer use case emerges, we can just add the necessary modules in here. - -//Configuring persistent disk types by using machine sets -include::modules/machineset-gcp-pd-disk-types.adoc[leveloffset=+2] - -//Configuring Confidential VM by using machine sets -include::modules/machineset-gcp-confidential-vm.adoc[leveloffset=+2] - -//Configuring Shielded VM options by using machine sets -include::modules/machineset-gcp-shielded-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[What is Shielded VM?] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot[Secure Boot] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm[Virtual Trusted Platform Module (vTPM)] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#integrity-monitoring[Integrity monitoring] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-gcp-enabling-customer-managed-encryption.adoc[leveloffset=+2] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/images b/machine_management/control_plane_machine_management/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/control_plane_machine_management/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/modules b/machine_management/control_plane_machine_management/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/control_plane_machine_management/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/snippets b/machine_management/control_plane_machine_management/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/control_plane_machine_management/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/machine_management/creating-infrastructure-machinesets.adoc b/machine_management/creating-infrastructure-machinesets.adoc deleted file mode 100644 index d7197242bb93..000000000000 --- a/machine_management/creating-infrastructure-machinesets.adoc +++ /dev/null @@ -1,137 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-infrastructure-machinesets"] -= Creating infrastructure machine sets -include::_attributes/common-attributes.adoc[] -:context: creating-infrastructure-machinesets - -toc::[] - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - - -You can use infrastructure machine sets to create machines that host only infrastructure components, such as the default router, the integrated container image registry, and the components for cluster metrics and monitoring. These infrastructure machines are not counted toward the total number of subscriptions that are required to run the environment. - -In a production deployment, it is recommended that you deploy at least three machine sets to hold infrastructure components. Both OpenShift Logging and {SMProductName} deploy Elasticsearch, which requires three instances to be installed on different nodes. Each of these nodes can be deployed to different availability zones for high availability. This configuration requires three different machine sets, one for each availability zone. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. - -include::modules/infrastructure-components.adoc[leveloffset=+1] - -For information about infrastructure nodes and which components can run on infrastructure nodes, see the "Red Hat OpenShift control plane and infrastructure nodes" section in the link:https://www.redhat.com/en/resources/openshift-subscription-sizing-guide[OpenShift sizing and subscription guide for enterprise Kubernetes] document. - -To create an infrastructure node, you can xref:../machine_management/creating-infrastructure-machinesets.adoc#machineset-creating_creating-infrastructure-machinesets[use a machine set], xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-an-infra-node_creating-infrastructure-machinesets[label the node], or xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-infra-machines_creating-infrastructure-machinesets[use a machine config pool]. - -[id="creating-infrastructure-machinesets-production"] -== Creating infrastructure machine sets for production environments - -In a production deployment, it is recommended that you deploy at least three compute machine sets to hold infrastructure components. Both OpenShift Logging and {SMProductName} deploy Elasticsearch, which requires three instances to be installed on different nodes. Each of these nodes can be deployed to different availability zones for high availability. A configuration like this requires three different compute machine sets, one for each availability zone. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. - -[id="creating-infrastructure-machinesets-clouds"] -=== Creating infrastructure machine sets for different clouds - -Use the sample compute machine set for your cloud. - -include::modules/machineset-yaml-alibaba.adoc[leveloffset=+3] - -//Machine set parameters for Alibaba Cloud usage statistics -[discrete] -include::modules/machineset-yaml-alibaba-usage-stats.adoc[leveloffset=+4] - -include::modules/machineset-yaml-aws.adoc[leveloffset=+3] - -Machine sets running on AWS support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machineset-non-guaranteed-instance_creating-machineset-aws[Spot Instances]. You can save on costs by using Spot Instances at a lower price compared to -On-Demand Instances on AWS. xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-aws[Configure Spot Instances] by adding `spotMarketOptions` to the `MachineSet` YAML file. - -include::modules/machineset-yaml-azure.adoc[leveloffset=+3] - -Machine sets running on Azure support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-non-guaranteed-instance_creating-machineset-azure[Spot VMs]. You can save on costs by using Spot VMs at a lower price compared to standard VMs on Azure. You can xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-azure[configure Spot VMs] by adding `spotVMOptions` to the `MachineSet` YAML file. - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#installation-azure-marketplace-subscribe_creating-machineset-azure[Selecting an Azure Marketplace image] - -include::modules/machineset-yaml-azure-stack-hub.adoc[leveloffset=+3] - -[NOTE] -==== -Machine sets running on Azure Stack Hub do not support non-guaranteed Spot VMs. -==== - -include::modules/machineset-yaml-ibm-cloud.adoc[leveloffset=+3] - -include::modules/machineset-yaml-gcp.adoc[leveloffset=+3] - -Machine sets running on GCP support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-non-guaranteed-instance_creating-machineset-gcp[preemptible VM instances]. You can save on costs by using preemptible VM instances at a lower price -compared to normal instances on GCP. You can xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-gcp[configure preemptible VM instances] by adding `preemptible` to the `MachineSet` YAML file. - -include::modules/machineset-yaml-nutanix.adoc[leveloffset=+3] - -include::modules/machineset-yaml-osp.adoc[leveloffset=+3] - -include::modules/machineset-yaml-vsphere.adoc[leveloffset=+3] - -include::modules/machineset-creating.adoc[leveloffset=+2] - -include::modules/creating-an-infra-node.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:moving-resources-to-infrastructure-machinesets[Moving resources to infrastructure machine sets] - -include::modules/creating-infra-machines.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../architecture/control-plane.adoc#architecture-machine-config-pools_control-plane[Node configuration management with machine config pools] for more information on grouping infra machines in a custom pool. - -[id="assigning-machineset-resources-to-infra-nodes"] -== Assigning machine set resources to infrastructure nodes - -After creating an infrastructure machine set, the `worker` and `infra` roles are applied to new infra nodes. Nodes with the `infra` role applied are not counted toward the total number of subscriptions that are required to run the environment, even when the `worker` role is also applied. - -However, with an infra node being assigned as a worker, there is a chance user workloads could get inadvertently assigned to an infra node. To avoid this, you can apply a taint to the infra node and tolerations for the pods you want to control. - -include::modules/binding-infra-node-workloads-using-taints-tolerations.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../nodes/scheduling/nodes-scheduler-about.adoc#nodes-scheduler-about[Controlling pod placement using the scheduler] for general information on scheduling a pod to a node. -* See xref:moving-resources-to-infrastructure-machinesets[Moving resources to infrastructure machine sets] for instructions on scheduling pods to infra nodes. - -[id="moving-resources-to-infrastructure-machinesets"] -== Moving resources to infrastructure machine sets - -Some of the infrastructure resources are deployed in your cluster by default. You can move them to the infrastructure machine sets that you created by adding the infrastructure node selector, as shown: - -[source,yaml] ----- -spec: - nodePlacement: <1> - nodeSelector: - matchLabels: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -Applying a specific node selector to all infrastructure components causes {product-title} to xref:../machine_management/creating-infrastructure-machinesets.adoc#moving-resources-to-infrastructure-machinesets[schedule those workloads on nodes with that label]. - -include::modules/infrastructure-moving-router.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-registry.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-monitoring.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-logging.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/configuring-the-monitoring-stack.adoc#moving-monitoring-components-to-different-nodes_configuring-the-monitoring-stack[the monitoring documentation] for the general instructions on moving {product-title} components. diff --git a/machine_management/creating_machinesets/_attributes b/machine_management/creating_machinesets/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/creating_machinesets/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/creating-machineset-alibaba.adoc b/machine_management/creating_machinesets/creating-machineset-alibaba.adoc deleted file mode 100644 index 391471881e89..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-alibaba.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-alibaba"] -= Creating a compute machine set on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-alibaba - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Alibaba Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Alibaba Cloud -include::modules/machineset-yaml-alibaba.adoc[leveloffset=+1] - -//Machine set parameters for Alibaba Cloud usage statistics -include::modules/machineset-yaml-alibaba-usage-stats.adoc[leveloffset=+2] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-aws.adoc b/machine_management/creating_machinesets/creating-machineset-aws.adoc deleted file mode 100644 index 912e18199775..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-aws.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-aws"] -= Creating a compute machine set on AWS -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-aws - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Amazon Web Services (AWS). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on AWS -include::modules/machineset-yaml-aws.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Machine sets that enable the Amazon EC2 Instance Metadata Service -include::modules/machineset-imds-options.adoc[leveloffset=+1] - -//Creating machines that use the Amazon EC2 Instance Metadata Service -include::modules/machineset-creating-imds-options.adoc[leveloffset=+2] - -//Machine sets that deploy machines as Dedicated Instances -include::modules/machineset-dedicated-instances.adoc[leveloffset=+1] - -//Creating Dedicated Instances by using machine sets -include::modules/machineset-creating-dedicated-instances.adoc[leveloffset=+2] - -//Machine sets that deploy machines as Spot Instances -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating Spot Instances by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-aws-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc b/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc deleted file mode 100644 index bd14868eb8bf..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-azure-stack-hub"] -= Creating a compute machine set on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-azure-stack-hub - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Microsoft Azure Stack Hub. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Azure Stack Hub -include::modules/machineset-yaml-azure-stack-hub.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Enabling Azure boot diagnostics on compute machines -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+1] - -//Enabling customer-managed encryption keys for a compute machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-azure.adoc b/machine_management/creating_machinesets/creating-machineset-azure.adoc deleted file mode 100644 index e3f30f1f0ad3..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-azure.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-azure"] -= Creating a compute machine set on Azure -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-azure - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Microsoft Azure. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Azure -include::modules/machineset-yaml-azure.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Selecting an Azure Marketplace image -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -//Enabling Azure boot diagnostics -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+1] - -//Machine sets that deploy machines as Spot VMs -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating Spot VMs by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Machine sets that deploy machines on Ephemeral OS disks -include::modules/machineset-azure-ephemeral-os.adoc[leveloffset=+1] - -//Creating machines on Ephemeral OS disks by using compute machine sets -include::modules/machineset-creating-azure-ephemeral-os.adoc[leveloffset=+2] - -//Machine sets that deploy machines on ultra disks as data disks -include::modules/machineset-azure-ultra-disk.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disks[Microsoft Azure ultra disks documentation] -* xref:../../storage/container_storage_interface/persistent-storage-csi-azure.adoc#machineset-azure-ultra-disk_persistent-storage-csi-azure[Machine sets that deploy machines on ultra disks using CSI PVCs] -* xref:../../storage/persistent_storage/persistent-storage-azure.adoc#machineset-azure-ultra-disk_persistent-storage-azure[Machine sets that deploy machines on ultra disks using in-tree PVCs] - -//Creating machines on ultra disks by using machine sets -include::modules/machineset-creating-azure-ultra-disk.adoc[leveloffset=+2] - -//Troubleshooting resources for machine sets that enable ultra disks -include::modules/machineset-troubleshooting-azure-ultra-disk.adoc[leveloffset=+2] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+1] - -// Accelerated Networking for Microsoft Azure VMs -include::modules/machineset-azure-accelerated-networking.adoc[leveloffset=+1] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-azure-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_azure/installing-azure-customizations.adoc#machineset-azure-enabling-accelerated-networking-new-install_installing-azure-customizations[Enabling Accelerated Networking during installation] - -// Enabling Accelerated Networking on an existing Microsoft Azure cluster -include::modules/machineset-azure-enabling-accelerated-networking-existing.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scaling a compute machine set] diff --git a/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc b/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc deleted file mode 100644 index cca9c7f268d1..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-bare-metal"] -= Creating a compute machine set on bare metal -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-bare-metal - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on bare metal. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-yaml-baremetal.adoc[leveloffset=+1] - -include::modules/machineset-creating.adoc[leveloffset=+1] - -// Mothballed - re-add when available -// include::modules/machineset-osp-adding-bare-metal.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-gcp.adoc b/machine_management/creating_machinesets/creating-machineset-gcp.adoc deleted file mode 100644 index 9857240d32be..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-gcp.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-gcp"] -= Creating a compute machine set on GCP -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-gcp - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Google Cloud Platform (GCP). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on GCP -include::modules/machineset-yaml-gcp.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Configuring persistent disk types by using compute machine sets -include::modules/machineset-gcp-pd-disk-types.adoc[leveloffset=+1] - -//Configuring Confidential VM by using machine sets -include::modules/machineset-gcp-confidential-vm.adoc[leveloffset=+1] - -//Machine sets that deploy machines as preemptible VM instances -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating preemptible VM instances by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Configuring Shielded VM options by using machine sets -include::modules/machineset-gcp-shielded-vms.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[What is Shielded VM?] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot[Secure Boot] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm[Virtual Trusted Platform Module (vTPM)] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#integrity-monitoring[Integrity monitoring] - -//Enabling customer-managed encryption keys for a compute machine set -include::modules/machineset-gcp-enabling-customer-managed-encryption.adoc[leveloffset=+1] - -//Enabling GPU support for a compute machine set -include::modules/machineset-gcp-enabling-gpu-support.adoc[leveloffset=+1] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc b/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc deleted file mode 100644 index fc58ff72de4f..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-ibm-cloud"] -= Creating a compute machine set on IBM Cloud -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-ibm-cloud - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on IBM Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a machine set custom resource on IBM Cloud -include::modules/machineset-yaml-ibm-cloud.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc b/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc deleted file mode 100644 index 0ee790376372..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-ibm-power-vs"] -= Creating a compute machine set on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-ibm-power-vs - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {ibmpowerProductName} Virtual Server. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a machine set custom resource on IBM Cloud -include::modules/machineset-yaml-ibm-power-vs.adoc[leveloffset=+1] - -//Creating a machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-nutanix.adoc b/machine_management/creating_machinesets/creating-machineset-nutanix.adoc deleted file mode 100644 index 41e16fc9f03b..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-nutanix.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-nutanix"] -= Creating a compute machine set on Nutanix -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-nutanix - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Nutanix. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Nutanix -include::modules/machineset-yaml-nutanix.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-osp.adoc b/machine_management/creating_machinesets/creating-machineset-osp.adoc deleted file mode 100644 index 57e30a54a9ee..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-osp.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-osp"] -= Creating a compute machine set on OpenStack -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-osp - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {rh-openstack-first}. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-yaml-osp.adoc[leveloffset=+1] - -include::modules/machineset-yaml-osp-sr-iov.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_openstack/installing-openstack-nfv-preparing.adoc#installing-openstack-nfv-preparing[Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack] - -include::modules/machineset-yaml-osp-sr-iov-port-security.adoc[leveloffset=+1] - -include::modules/machineset-creating.adoc[leveloffset=+1] - -// Mothballed - re-add when available -// include::modules/machineset-osp-adding-bare-metal.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc b/machine_management/creating_machinesets/creating-machineset-vsphere.adoc deleted file mode 100644 index 9965cbd3ba96..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-vsphere"] -= Creating a compute machine set on vSphere -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-vsphere - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on VMware vSphere. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on vSphere -include::modules/machineset-yaml-vsphere.adoc[leveloffset=+1] - -//Minimum required vCenter privileges for compute machine set management -include::modules/machineset-vsphere-required-permissions.adoc[leveloffset=+1] - -//Requirements for clusters with user-provisioned infrastructure to use compute machine sets -include::modules/compute-machineset-upi-reqs.adoc[leveloffset=+1] - -//Obtaining the infrastructure ID -[discrete] -include::modules/machineset-upi-reqs-infra-id.adoc[leveloffset=+2] - -//Satisfying vSphere credentials requirements -[discrete] -include::modules/machineset-upi-reqs-vsphere-creds.adoc[leveloffset=+2] - -//Satisfying ignition configuration requirements -[discrete] -include::modules/machineset-upi-reqs-ignition-config.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Understanding the Machine Config Operator] -* xref:../../installing/installing_vsphere/installing-vsphere.adoc#installation-vsphere-machines_installing-vsphere[Installing {op-system} and starting the {product-title} bootstrap process] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/images b/machine_management/creating_machinesets/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/creating_machinesets/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/modules b/machine_management/creating_machinesets/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/creating_machinesets/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/snippets b/machine_management/creating_machinesets/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/creating_machinesets/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/machine_management/deleting-machine.adoc b/machine_management/deleting-machine.adoc deleted file mode 100644 index 9f7d9dd1c3c7..000000000000 --- a/machine_management/deleting-machine.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="deleting-machine"] -= Deleting a machine -include::_attributes/common-attributes.adoc[] -:context: deleting-machine - -toc::[] - -You can delete a specific machine. - -//Deleting a specific machine -include::modules/machine-delete.adoc[leveloffset=+1] - -//Lifecycle hooks for the machine deletion phase -include::modules/machine-lifecycle-hook-deletion.adoc[leveloffset=+1] - -//Deletion lifecycle hook configuration -include::modules/machine-lifecycle-hook-deletion-format.adoc[leveloffset=+2] - -//Machine deletion lifecycle hook examples for Operator developers -include::modules/machine-lifecycle-hook-deletion-uses.adoc[leveloffset=+2] - -//Quorum protection with machine lifecycle hooks -include::modules/machine-lifecycle-hook-deletion-etcd.adoc[leveloffset=+2] - - -[role="_additional-resources"] -[id="additional-resources_unhealthy-etcd-member"] -== Additional resources - -* xref:../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member] -* xref:../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-using[Managing control plane machines with control plane machine sets] \ No newline at end of file diff --git a/machine_management/deploying-machine-health-checks.adoc b/machine_management/deploying-machine-health-checks.adoc deleted file mode 100644 index 6f7bcb0feb70..000000000000 --- a/machine_management/deploying-machine-health-checks.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="deploying-machine-health-checks"] -= Deploying machine health checks -include::_attributes/common-attributes.adoc[] -:context: deploying-machine-health-checks - -toc::[] - -You can configure and deploy a machine health check to automatically repair damaged machines in a machine pool. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machine-health-checks-about.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing_nodes-nodes-viewing[About listing all the nodes in a cluster] -* xref:../machine_management/deploying-machine-health-checks.adoc#machine-health-checks-short-circuiting_deploying-machine-health-checks[Short-circuiting machine health check remediation] -* xref:../machine_management/control_plane_machine_management/cpmso-about.adoc#cpmso-about[About the Control Plane Machine Set Operator] - -include::modules/machine-health-checks-resource.adoc[leveloffset=+1] - -include::modules/machine-health-checks-creating.adoc[leveloffset=+1] - -You can configure and deploy a machine health check to detect and repair unhealthy bare metal nodes. - -include::modules/mgmt-power-remediation-baremetal-about.adoc[leveloffset=+1] diff --git a/machine_management/images b/machine_management/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/machine_management/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/machine_management/index.adoc b/machine_management/index.adoc deleted file mode 100644 index f5222bad539b..000000000000 --- a/machine_management/index.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: ASSEMBLY -[id="overview-of-machine-management"] -= Overview of machine management -include::_attributes/common-attributes.adoc[] -:context: overview-of-machine-management - -toc::[] - -You can use machine management to flexibly work with underlying infrastructure such as Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere to manage the {product-title} cluster. -You can control the cluster and perform auto-scaling, such as scaling up and down the cluster based on specific workload policies. - -It is important to have a cluster that adapts to changing workloads. The {product-title} cluster can horizontally scale up and down when the load increases or decreases. - -Machine management is implemented as a xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-extending-api-with-crds[custom resource definition] (CRD). -A CRD object defines a new unique object `Kind` in the cluster and enables the Kubernetes API server to handle the object's entire lifecycle. - -The Machine API Operator provisions the following resources: - -* `MachineSet` -* `Machine` -* `ClusterAutoscaler` -* `MachineAutoscaler` -* `MachineHealthCheck` - -include::modules/machine-api-overview.adoc[leveloffset=+1] - -[id="machine-mgmt-intro-managing-compute_{context}"] -== Managing compute machines - -As a cluster administrator, you can perform the following actions: - -* Create a compute machine set for the following cloud providers: - -** xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#creating-machineset-aws[AWS] - -** xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#creating-machineset-azure[Azure] - -** xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#creating-machineset-gcp[GCP] - -** xref:../machine_management/creating_machinesets/creating-machineset-osp.adoc#creating-machineset-osp[{rh-openstack}] - -** xref:../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[vSphere] - -* Create a machine set for a bare metal deployment: xref:../machine_management/creating_machinesets/creating-machineset-bare-metal.adoc#creating-machineset-bare-metal[Creating a compute machine set on bare metal] - -* xref:../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scale a compute machine set] by adding or removing a machine from the compute machine set. - -* xref:../machine_management/modifying-machineset.adoc#modifying-machineset[Modify a compute machine set] through the `MachineSet` YAML configuration file. - -* xref:../machine_management/deleting-machine.adoc#deleting-machine[Delete] a machine. - -* xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Create infrastructure compute machine sets]. - -* Configure and deploy a xref:../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[machine health check] to automatically fix damaged machines in a machine pool. - -[id="machine-mgmt-intro-managing-control-plane_{context}"] -== Managing control plane machines - -As a cluster administrator, you can perform the following actions: - -* xref:../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Update your control plane configuration] with a control plane machine set for the following cloud providers: - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[AWS] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[GCP] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Azure] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-nutanix_cpmso-configuration[Nutanix] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[vSphere] - -* Configure and deploy a xref:../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[machine health check] to automatically recover unhealthy control plane machines. - -[id="machine-mgmt-intro-autoscaling_{context}"] -== Applying autoscaling to an {product-title} cluster - -You can automatically scale your {product-title} cluster to ensure flexibility for changing workloads. To xref:../machine_management/applying-autoscaling.adoc#applying-autoscaling[autoscale] your cluster, you must first deploy a cluster autoscaler, and then deploy a machine autoscaler for each compute machine set. - -* The xref:../machine_management/applying-autoscaling.adoc#cluster-autoscaler-about_applying-autoscaling[_cluster autoscaler_] increases and decreases the size of the cluster based on deployment needs. - -* The xref:../machine_management/applying-autoscaling.adoc#machine-autoscaler-about_applying-autoscaling[_machine autoscaler_] adjusts the number of machines in the compute machine sets that you deploy in your {product-title} cluster. - -[id="machine-mgmt-intro-add-for-upi_{context}"] -== Adding compute machines on user-provisioned infrastructure -User-provisioned infrastructure is an environment where you can deploy infrastructure such as compute, network, and storage resources that host the {product-title}. You can xref:../machine_management//user_infra/adding-compute-user-infra-general.adoc#adding-compute-user-infra-general[add compute machines] to a cluster on user-provisioned infrastructure during or after the installation process. - -[id="machine-mgmt-intro-add-rhel_{context}"] -== Adding RHEL compute machines to your cluster - -As a cluster administrator, you can perform the following actions: - -** xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Add Red Hat Enterprise Linux (RHEL) compute machines], also known as worker machines, to a user-provisioned infrastructure cluster or an installation-provisioned infrastructure cluster. - -** xref:../machine_management/more-rhel-compute.adoc#more-rhel-compute[Add more Red Hat Enterprise Linux (RHEL) compute machines] to an existing cluster. diff --git a/machine_management/manually-scaling-machineset.adoc b/machine_management/manually-scaling-machineset.adoc deleted file mode 100644 index 08994f3dfcc6..000000000000 --- a/machine_management/manually-scaling-machineset.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-scaling-machineset"] -= Manually scaling a compute machine set -include::_attributes/common-attributes.adoc[] -:context: manually-scaling-machineset - -toc::[] - -You can add or remove an instance of a machine in a compute machine set. - -[NOTE] -==== -If you need to modify aspects of a compute machine set outside of scaling, see xref:../machine_management/modifying-machineset.adoc#modifying-machineset[Modifying a compute machine set]. -==== - -== Prerequisites - -* If you enabled the cluster-wide proxy and scale up compute machines not included in `networking.machineNetwork[].cidr` from the installation configuration, you must xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[add the compute machines to the Proxy object's `noProxy` field] to prevent connection issues. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-manually-scaling.adoc[leveloffset=+1] - -include::modules/machineset-delete-policy.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_manually-scaling-machineset"] -== Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] \ No newline at end of file diff --git a/machine_management/modifying-machineset.adoc b/machine_management/modifying-machineset.adoc deleted file mode 100644 index 827e439d70bb..000000000000 --- a/machine_management/modifying-machineset.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="modifying-machineset"] -= Modifying a compute machine set -include::_attributes/common-attributes.adoc[] -:context: modifying-machineset - -toc::[] - -You can modify a compute machine set, such as adding labels, changing the instance type, or changing block storage. - -[NOTE] -==== -If you need to scale a compute machine set without making other changes, see xref:../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scaling a compute machine set]. -==== - -include::modules/machineset-modifying.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/manually-scaling-machineset.adoc#machineset-manually-scaling_manually-scaling-machineset[Scaling a compute machine set manually] -* xref:../nodes/scheduling/nodes-scheduler-about.adoc#nodes-scheduler-about[Controlling pod placement using the scheduler] diff --git a/machine_management/modules b/machine_management/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/machine_management/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/machine_management/more-rhel-compute.adoc b/machine_management/more-rhel-compute.adoc deleted file mode 100644 index b3860d5f32b8..000000000000 --- a/machine_management/more-rhel-compute.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="more-rhel-compute"] -= Adding more RHEL compute machines to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: more-rhel-compute - -toc::[] - -If your {product-title} cluster already includes Red Hat Enterprise Linux (RHEL) compute machines, which are also known as worker machines, you can add more RHEL compute machines to it. - -include::modules/rhel-compute-overview.adoc[leveloffset=+1] - -include::modules/rhel-compute-requirements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-deleting_nodes-nodes-working[Deleting nodes] - -include::modules/csr-management.adoc[leveloffset=+2] - -[id="more-rhel-compute-preparing-image-cloud"] -== Preparing an image for your cloud - -Amazon Machine Images (AMI) are required since various image formats cannot be used directly by AWS. You may use the AMIs that Red Hat has provided, or you can manually import your own images. The AMI must exist before the EC2 instance can be provisioned. You must list the AMI IDs so that the correct {op-system-base} version needed for the compute machines is selected. - -include::modules/rhel-images-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* You may also manually link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/image_builder_guide/sect-documentation-image_builder-chapter5-section_2[import {op-system-base} images to AWS]. - -include::modules/rhel-preparing-node.adoc[leveloffset=+1] - -include::modules/rhel-attaching-instance-aws.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* See xref:../installing/installing_aws/installing-aws-account.adoc#installation-aws-permissions-iam-roles_installing-aws-account[Required AWS permissions for IAM roles]. - -include::modules/rhel-worker-tag.adoc[leveloffset=+1] - -include::modules/rhel-adding-more-nodes.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/rhel-ansible-parameters.adoc[leveloffset=+1] diff --git a/machine_management/snippets b/machine_management/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/machine_management/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/machine_management/user_infra/_attributes b/machine_management/user_infra/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/user_infra/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/user_infra/adding-aws-compute-user-infra.adoc b/machine_management/user_infra/adding-aws-compute-user-infra.adoc deleted file mode 100644 index 7f8b9c5a2838..000000000000 --- a/machine_management/user_infra/adding-aws-compute-user-infra.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-aws-compute-user-infra"] -= Adding compute machines to AWS by using CloudFormation templates -include::_attributes/common-attributes.adoc[] -:context: adding-aws-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on Amazon Web Services (AWS) that you created by using the sample CloudFormation templates. - -[id="prerequisites_adding-aws-compute-user-infra"] -== Prerequisites - -* You installed your cluster on AWS by using the provided xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS CloudFormation templates]. -* You have the JSON file and CloudFormation template that you used to create the compute machines during cluster installation. If you do not have these files, you must recreate them by following the instructions in the xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[installation procedure]. - -include::modules/machine-adding-aws-compute-cloudformation.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc b/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc deleted file mode 100644 index f1fa52de60fe..000000000000 --- a/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-bare-metal-compute-user-infra"] -= Adding compute machines to bare metal -include::_attributes/common-attributes.adoc[] -:context: adding-bare-metal-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on bare metal. - -== Prerequisites - -* You xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[installed a cluster on bare metal]. -* You have installation media and {op-system-first} images that you used to create your cluster. If you do not have these files, you must obtain them by following the instructions in the xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[installation procedure]. -* If a DHCP server is available for your user-provisioned infrastructure, you have added the details for the additional compute machines to your DHCP server configuration. This includes a persistent IP address, DNS server information, and a hostname for each machine. -* You have updated your DNS configuration to include the record name and IP address of each compute machine that you are adding. You have validated that DNS lookup and reverse DNS lookup resolve correctly. - -[IMPORTANT] -==== -If you do not have access to the {op-system-first} images that were used to create your cluster, you can add more compute machines to your {product-title} cluster with newer versions of {op-system-first} images. For instructions, see link:https://access.redhat.com/solutions/5514051[Adding new nodes to UPI cluster fails after upgrading to OpenShift 4.6+]. -==== - -[id="creating-rhcos-machines-bare-metal"] -== Creating {op-system-first} machines - -Before you add more compute machines to a cluster that you installed on bare metal infrastructure, you must create {op-system} machines for it to use. You can either use an ISO image or network PXE booting to create the machines. - -[NOTE] -==== -You must use the same ISO image that you used to install a cluster to deploy all new nodes in a cluster. It is recommended to use the same Ignition config file. The nodes automatically upgrade themselves on the first boot before running the workloads. You can add the nodes before or after the upgrade. -==== - -include::modules/machine-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/machine-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/adding-compute-user-infra-general.adoc b/machine_management/user_infra/adding-compute-user-infra-general.adoc deleted file mode 100644 index 851a2ead37b0..000000000000 --- a/machine_management/user_infra/adding-compute-user-infra-general.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-compute-user-infra-general"] -= Adding compute machines to clusters with user-provisioned infrastructure manually -include::_attributes/common-attributes.adoc[] -:context: adding-compute-user-infra-general - -toc::[] - -You can add compute machines to a cluster on user-provisioned infrastructure either as part of the installation process or after installation. The post-installation process requires some of the same configuration files and parameters that were used during installation. - -[id="upi-adding-compute-aws"] -== Adding compute machines to Amazon Web Services - -To add more compute machines to your {product-title} cluster on Amazon Web Services (AWS), see xref:../../machine_management/user_infra/adding-aws-compute-user-infra.adoc#adding-aws-compute-user-infra[Adding compute machines to AWS by using CloudFormation templates]. - -[id="upi-adding-compute-azure"] -== Adding compute machines to Microsoft Azure - -To add more compute machines to your {product-title} cluster on Microsoft Azure, see xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installation-creating-azure-worker_installing-azure-user-infra[Creating additional worker machines in Azure]. - -[id="upi-adding-compute-ash"] -== Adding compute machines to Azure Stack Hub - -To add more compute machines to your {product-title} cluster on Azure Stack Hub, see xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installation-creating-azure-worker_installing-azure-stack-hub-user-infra[Creating additional worker machines in Azure Stack Hub]. - -[id="upi-adding-compute-gcp"] -== Adding compute machines to Google Cloud Platform - -To add more compute machines to your {product-title} cluster on Google Cloud Platform (GCP), see xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installation-creating-gcp-worker_installing-restricted-networks-gcp[Creating additional worker machines in GCP]. - -[id="upi-adding-compute-vsphere"] -== Adding compute machines to vSphere - -You can xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[use compute machine sets] to automate the creation of additional compute machines for your {product-title} cluster on vSphere. - -To manually add more compute machines to your cluster, see xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere manually]. - - -[id="upi-adding-compute-bare-metal"] -== Adding compute machines to bare metal - -To add more compute machines to your {product-title} cluster on bare metal, see xref:../../machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc#adding-bare-metal-compute-user-infra[Adding compute machines to bare metal]. diff --git a/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc b/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc deleted file mode 100644 index ab5b6dd86d57..000000000000 --- a/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-vsphere-compute-user-infra"] -= Adding compute machines to vSphere manually -include::_attributes/common-attributes.adoc[] -:context: adding-vsphere-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on VMware vSphere manually. - -[NOTE] -==== -You can also xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[use compute machine sets] to automate the creation of additional VMware vSphere compute machines for your cluster. -==== - -== Prerequisites - -* You xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[installed a cluster on vSphere]. - -* You have installation media and {op-system-first} images that you used to create your cluster. If you do not have these files, you must obtain them by following the instructions in the xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[installation procedure]. - -[IMPORTANT] -==== -If you do not have access to the {op-system-first} images that were used to create your cluster, you can add more compute machines to your {product-title} cluster with newer versions of {op-system-first} images. For instructions, see link:https://access.redhat.com/solutions/5514051[Adding new nodes to UPI cluster fails after upgrading to OpenShift 4.6+]. -==== - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/images b/machine_management/user_infra/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/user_infra/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/user_infra/modules b/machine_management/user_infra/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/user_infra/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/user_infra/snippets b/machine_management/user_infra/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/user_infra/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/masters/images b/masters/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/masters/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/masters/modules b/masters/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/masters/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/metering/_attributes b/metering/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/metering/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/metering/configuring_metering/_attributes b/metering/configuring_metering/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/metering/configuring_metering/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/metering/configuring_metering/images b/metering/configuring_metering/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/metering/configuring_metering/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/metering/configuring_metering/metering-about-configuring.adoc b/metering/configuring_metering/metering-about-configuring.adoc deleted file mode 100644 index e13b9bbe28f3..000000000000 --- a/metering/configuring_metering/metering-about-configuring.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-about-configuring"] -= About configuring metering -include::_attributes/common-attributes.adoc[] -:context: metering-about-configuring - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The `MeteringConfig` custom resource specifies all the configuration details for your metering installation. When you first install the metering stack, a default `MeteringConfig` custom resource is generated. Use the examples in the documentation to modify this default file. Keep in mind the following key points: - -* At a minimum, you need to xref:../../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -* Most default configuration settings work, but larger deployments or highly customized deployments should review all configuration options carefully. - -* Some configuration options can not be modified after installation. - -For configuration options that can be modified after installation, make the changes in your `MeteringConfig` custom resource and reapply the file. diff --git a/metering/configuring_metering/metering-common-config-options.adoc b/metering/configuring_metering/metering-common-config-options.adoc deleted file mode 100644 index 0c8c9addbb4b..000000000000 --- a/metering/configuring_metering/metering-common-config-options.adoc +++ /dev/null @@ -1,175 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-common-config-options"] -= Common configuration options -include::_attributes/common-attributes.adoc[] -:context: metering-common-config-options - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Resource requests and limits -You can adjust the CPU, memory, or storage resource requests and/or limits for pods and volumes. The `default-resource-limits.yaml` below provides an example of setting resource request and limits for each component. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - resources: - limits: - cpu: 1 - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi - presto: - spec: - coordinator: - resources: - limits: - cpu: 4 - memory: 4Gi - requests: - cpu: 2 - memory: 2Gi - - worker: - replicas: 0 - resources: - limits: - cpu: 8 - memory: 8Gi - requests: - cpu: 4 - memory: 2Gi - - hive: - spec: - metastore: - resources: - limits: - cpu: 4 - memory: 2Gi - requests: - cpu: 500m - memory: 650Mi - storage: - class: null - create: true - size: 5Gi - server: - resources: - limits: - cpu: 1 - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi ----- - -== Node selectors -You can run the metering components on specific sets of nodes. Set the `nodeSelector` on a metering component to control where the component is scheduled. The `node-selectors.yaml` file below provides an example of setting node selectors for each component. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. Specify `""` as the annotation value. -==== - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - - presto: - spec: - coordinator: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - worker: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - hive: - spec: - metastore: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - server: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use key-value pairs, based on the value specified for the node. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. When the `openshift.io/node-selector` annotation is set on the project, the value is used in preference to the value of the `spec.defaultNodeSelector` field in the cluster-wide `Scheduler` object. -==== - -.Verification - -You can verify the metering node selectors by performing any of the following checks: - -* Verify that all pods for metering are correctly scheduled on the IP of the node that is configured in the `MeteringConfig` custom resource: -+ --- -. Check all pods in the `openshift-metering` namespace: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering get pods -o wide ----- -+ -The output shows the `NODE` and corresponding `IP` for each pod running in the `openshift-metering` namespace. -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -hive-metastore-0 1/2 Running 0 4m33s 10.129.2.26 ip-10-0-210-167.us-east-2.compute.internal -hive-server-0 2/3 Running 0 4m21s 10.128.2.26 ip-10-0-150-175.us-east-2.compute.internal -metering-operator-964b4fb55-4p699 2/2 Running 0 7h30m 10.131.0.33 ip-10-0-189-6.us-east-2.compute.internal -nfs-server 1/1 Running 0 7h30m 10.129.2.24 ip-10-0-210-167.us-east-2.compute.internal -presto-coordinator-0 2/2 Running 0 4m8s 10.131.0.35 ip-10-0-189-6.us-east-2.compute.internal -reporting-operator-869b854c78-8g2x5 1/2 Running 0 7h27m 10.128.2.25 ip-10-0-150-175.us-east-2.compute.internal ----- -+ -. Compare the nodes in the `openshift-metering` namespace to each node `NAME` in your cluster: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-147-106.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-150-175.us-east-2.compute.internal Ready worker 14h v1.27.3 -ip-10-0-175-23.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-189-6.us-east-2.compute.internal Ready worker 14h v1.27.3 -ip-10-0-205-158.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-210-167.us-east-2.compute.internal Ready worker 14h v1.27.3 ----- --- - -* Verify that the node selector configuration in the `MeteringConfig` custom resource does not interfere with the cluster-wide node selector configuration such that no metering operand pods are scheduled. - -** Check the cluster-wide `Scheduler` object for the `spec.defaultNodeSelector` field, which shows where pods are scheduled by default: -+ -[source,terminal] ----- -$ oc get schedulers.config.openshift.io cluster -o yaml ----- diff --git a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc b/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc deleted file mode 100644 index db22749f27e3..000000000000 --- a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-aws-billing-correlation"] -= Configure AWS billing correlation -include::_attributes/common-attributes.adoc[] -:context: metering-configure-aws-billing-correlation - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering can correlate cluster usage information with https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-costusage.html[AWS detailed billing information], attaching a dollar amount to resource usage. For clusters running in EC2, you can enable this by modifying the example `aws-billing.yaml` file below. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - openshift-reporting: - spec: - awsBillingReportDataSource: - enabled: true - # Replace these with where your AWS billing reports are - # stored in S3. - bucket: "" <1> - prefix: "" - region: "" - - reporting-operator: - spec: - config: - aws: - secretName: "" <2> - - presto: - spec: - config: - aws: - secretName: "" <2> - - hive: - spec: - config: - aws: - secretName: "" <2> ----- -To enable AWS billing correlation, first ensure the AWS Cost and Usage Reports are enabled. For more information, see https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-gettingstarted-turnonreports.html[Turning on the AWS Cost and Usage Report] in the AWS documentation. - -<1> Update the bucket, prefix, and region to the location of your AWS Detailed billing report. -<2> All `secretName` fields should be set to the name of a secret in the metering namespace containing AWS credentials in the `data.aws-access-key-id` and `data.aws-secret-access-key` fields. See the example secret file below for more details. - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: -data: - aws-access-key-id: "dGVzdAo=" - aws-secret-access-key: "c2VjcmV0Cg==" ----- - -To store data in S3, the `aws-access-key-id` and `aws-secret-access-key` credentials must have read and write access to the bucket. For an example of an IAM policy granting the required permissions, see the `aws/read-write.json` file below. - -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} ----- -<1> Replace `operator-metering-data` with the name of your bucket. - -This can be done either pre-installation or post-installation. Disabling it post-installation can cause errors in the Reporting Operator. diff --git a/metering/configuring_metering/metering-configure-hive-metastore.adoc b/metering/configuring_metering/metering-configure-hive-metastore.adoc deleted file mode 100644 index b9aba58f910a..000000000000 --- a/metering/configuring_metering/metering-configure-hive-metastore.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-hive-metastore"] -= Configuring the Hive metastore -include::_attributes/common-attributes.adoc[] -:context: metering-configure-hive-metastore - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Hive metastore is responsible for storing all the metadata about the database tables created in Presto and Hive. By default, the metastore stores this information in a local embedded Derby database in a persistent volume attached to the pod. - -Generally, the default configuration of the Hive metastore works for small clusters, but users may wish to improve performance or move storage requirements out of cluster by using a dedicated SQL database for storing the Hive metastore data. - -include::modules/metering-configure-persistentvolumes.adoc[leveloffset=+1] - -include::modules/metering-use-mysql-or-postgresql-for-hive.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-persistent-storage.adoc b/metering/configuring_metering/metering-configure-persistent-storage.adoc deleted file mode 100644 index 6e9930cd4e45..000000000000 --- a/metering/configuring_metering/metering-configure-persistent-storage.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-persistent-storage"] -= Configuring persistent storage -include::_attributes/common-attributes.adoc[] -:context: metering-configure-persistent-storage - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering requires persistent storage to persist data collected by the Metering Operator and to store the results of reports. A number of different storage providers and storage formats are supported. Select your storage provider and modify the example configuration files to configure persistent storage for your metering installation. - -include::modules/metering-store-data-in-s3.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-s3-compatible.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-azure.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-gcp.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-shared-volumes.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-reporting-operator.adoc b/metering/configuring_metering/metering-configure-reporting-operator.adoc deleted file mode 100644 index dc6b53385854..000000000000 --- a/metering/configuring_metering/metering-configure-reporting-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-reporting-operator"] -= Configuring the Reporting Operator -include::_attributes/common-attributes.adoc[] -:context: metering-configure-reporting-operator - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The Reporting Operator is responsible for collecting data from Prometheus, storing the metrics in Presto, running report queries against Presto, and exposing their results via an HTTP API. Configuring the Reporting Operator is primarily done in your `MeteringConfig` custom resource. - -include::modules/metering-prometheus-connection.adoc[leveloffset=+1] - -include::modules/metering-exposing-the-reporting-api.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/modules b/metering/configuring_metering/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metering/configuring_metering/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metering/configuring_metering/snippets b/metering/configuring_metering/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/metering/configuring_metering/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/metering/images b/metering/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/metering/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/metering/metering-about-metering.adoc b/metering/metering-about-metering.adoc deleted file mode 100644 index 84814aedd33d..000000000000 --- a/metering/metering-about-metering.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-metering"] -= About Metering -include::_attributes/common-attributes.adoc[] -:context: about-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -include::modules/metering-overview.adoc[leveloffset=+1] diff --git a/metering/metering-installing-metering.adoc b/metering/metering-installing-metering.adoc deleted file mode 100644 index 566797b48389..000000000000 --- a/metering/metering-installing-metering.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-metering"] -= Installing metering -include::_attributes/common-attributes.adoc[] -:context: installing-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Review the following sections before installing metering into your cluster. - -To get started installing metering, first install the Metering Operator from OperatorHub. Next, configure your instance of metering by creating a `MeteringConfig` custom resource (CR). Installing the Metering Operator creates a default `MeteringConfig` resource that you can modify using the examples in the documentation. After creating your `MeteringConfig` resource, install the metering stack. Last, verify your installation. - -include::modules/metering-install-prerequisites.adoc[leveloffset=+1] - -include::modules/metering-install-operator.adoc[leveloffset=+1] - -// Including this content directly in the assembly because the workflow requires linking off to the config docs, and we don't current link -// inside of modules - klamenzo 2019-09-23 -[id="metering-install-metering-stack_{context}"] -== Installing the metering stack - -After adding the Metering Operator to your cluster you can install the components of metering by installing the metering stack. - -== Prerequisites - -* Review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuration options] -* Create a `MeteringConfig` resource. You can begin the following process to generate a default `MeteringConfig` resource, then use the examples in the documentation to modify this default file for your specific installation. Review the following topics to create your `MeteringConfig` resource: -** For configuration options, review xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering]. -** At a minimum, you need to xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -[IMPORTANT] -==== -There can only be one `MeteringConfig` resource in the `openshift-metering` namespace. Any other configuration is not supported. -==== - -.Procedure - -. From the web console, ensure you are on the *Operator Details* page for the Metering Operator in the `openshift-metering` project. You can navigate to this page by clicking *Operators* -> *Installed Operators*, then selecting the Metering Operator. - -. Under *Provided APIs*, click *Create Instance* on the Metering Configuration card. This opens a YAML editor with the default `MeteringConfig` resource file where you can define your configuration. -+ -[NOTE] -==== -For example configuration files and all supported configuration options, review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuring metering documentation]. -==== - -. Enter your `MeteringConfig` resource into the YAML editor and click *Create*. - -The `MeteringConfig` resource begins to create the necessary resources for your metering stack. You can now move on to verifying your installation. - -include::modules/metering-install-verify.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="metering-install-additional-resources_{context}"] -== Additional resources - -* For more information on configuration steps and available storage platforms, see xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[Configuring persistent storage]. - -* For the steps to configure Hive, see xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[Configuring the Hive metastore]. diff --git a/metering/metering-troubleshooting-debugging.adoc b/metering/metering-troubleshooting-debugging.adoc deleted file mode 100644 index 53333e6f390d..000000000000 --- a/metering/metering-troubleshooting-debugging.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-troubleshooting-debugging"] -= Troubleshooting and debugging metering -include::_attributes/common-attributes.adoc[] -:context: metering-troubleshooting-debugging - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following sections to help troubleshoot and debug specific issues with metering. - -In addition to the information in this section, be sure to review the following topics: - -* xref:../metering/metering-installing-metering.adoc#metering-install-prerequisites_installing-metering[Prerequisites for installing metering]. -* xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering] - -include::modules/metering-troubleshooting.adoc[leveloffset=+1] - -include::modules/metering-debugging.adoc[leveloffset=+1] diff --git a/metering/metering-uninstall.adoc b/metering/metering-uninstall.adoc deleted file mode 100644 index 256c69f27b39..000000000000 --- a/metering/metering-uninstall.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -:context: metering-uninstall -[id="metering-uninstall"] -= Uninstalling metering -include::_attributes/common-attributes.adoc[] - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -You can remove metering from your {product-title} cluster. - -[NOTE] -==== -Metering does not manage or delete Amazon S3 bucket data. After uninstalling metering, you must manually clean up S3 buckets that were used to store metering data. -==== - -[id="metering-remove"] -== Removing the Metering Operator from your cluster - -Remove the Metering Operator from your cluster by following the documentation on xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster]. - -[NOTE] -==== -Removing the Metering Operator from your cluster does not remove its custom resource definitions or managed resources. See the following sections on xref:../metering/metering-uninstall.adoc#metering-uninstall_metering-uninstall[Uninstalling a metering namespace] and xref:../metering/metering-uninstall.adoc#metering-uninstall-crds_metering-uninstall[Uninstalling metering custom resource definitions] for steps to remove any remaining metering components. -==== - -include::modules/metering-uninstall.adoc[leveloffset=+1] - -include::modules/metering-uninstall-crds.adoc[leveloffset=+1] diff --git a/metering/metering-upgrading-metering.adoc b/metering/metering-upgrading-metering.adoc deleted file mode 100644 index b74273887552..000000000000 --- a/metering/metering-upgrading-metering.adoc +++ /dev/null @@ -1,148 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-metering"] -= Upgrading metering -include::_attributes/common-attributes.adoc[] -:context: upgrading-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -You can upgrade metering to {product-version} by updating the Metering Operator subscription. - -== Prerequisites - -* The cluster is updated to {product-version}. -* The xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Metering Operator] is installed from OperatorHub. -+ -[NOTE] -==== -You must upgrade the Metering Operator to {product-version} manually. Metering does not upgrade automatically if you selected the "Automatic" *Approval Strategy* in a previous installation. -==== -* The xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[MeteringConfig custom resource] is configured. -* The xref:../metering/metering-installing-metering.adoc#metering-install-metering-stack_installing-metering[metering stack] is installed. -* Ensure that metering status is healthy by checking that all pods are ready. - -[IMPORTANT] -==== -Potential data loss can occur if you modify your metering storage configuration after installing or upgrading metering. -==== - -.Procedure - -. Click *Operators* -> *Installed Operators* from the web console. - -. Select the `openshift-metering` project. - -. Click *Metering Operator*. - -. Click *Subscription* -> *Channel*. - -. In the *Change Subscription Update Channel* window, select *{product-version}* and click *Save*. -+ -[NOTE] -==== -Wait several seconds to allow the subscription to update before proceeding to the next step. -==== -. Click *Operators* -> *Installed Operators*. -+ -The Metering Operator is shown as 4.9. For example: -+ ----- -Metering -4.9.0-202107012112.p0 provided by Red Hat, Inc ----- - -.Verification -You can verify the metering upgrade by performing any of the following checks: - -* Check the Metering Operator cluster service version (CSV) for the new metering version. This can be done through either the web console or CLI. -+ --- -.Procedure (UI) - . Navigate to *Operators* -> *Installed Operators* in the metering namespace. - . Click *Metering Operator*. - . Click *Subscription* for *Subscription Details*. - . Check the *Installed Version* for the upgraded metering version. The *Starting Version* shows the metering version prior to upgrading. - -.Procedure (CLI) -* Check the Metering Operator CSV: -+ -[source,terminal] ----- -$ oc get csv | grep metering ----- -+ -.Example output for metering upgrade from 4.8 to 4.9 -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -metering-operator.4.9.0-202107012112.p0 Metering 4.9.0-202107012112.p0 metering-operator.4.8.0-202007012112.p0 Succeeded ----- --- - -* Check that all required pods in the `openshift-metering` namespace are created. This can be done through either the web console or CLI. -+ --- -[NOTE] -==== -Many pods rely on other components to function before they themselves can be considered ready. Some pods may restart if other pods take too long to start. This is to be expected during the Metering Operator upgrade. -==== - -.Procedure (UI) -* Navigate to *Workloads* -> *Pods* in the metering namespace and verify that pods are being created. This can take several minutes after upgrading the metering stack. - -.Procedure (CLI) -* Check that all required pods in the `openshift-metering` namespace are created: -+ -[source,terminal] ----- -$ oc -n openshift-metering get pods ----- -.Example output -[source,terminal] -+ ----- -NAME READY STATUS RESTARTS AGE -hive-metastore-0 2/2 Running 0 3m28s -hive-server-0 3/3 Running 0 3m28s -metering-operator-68dd64cfb6-2k7d9 2/2 Running 0 5m17s -presto-coordinator-0 2/2 Running 0 3m9s -reporting-operator-5588964bf8-x2tkn 2/2 Running 0 2m40s ----- --- - -* Verify that the `ReportDataSource` resources are importing new data, indicated by a valid timestamp in the `NEWEST METRIC` column. This might take several minutes. Filter out the "-raw" `ReportDataSource` resources, which do not import data: -+ -[source,terminal] ----- -$ oc get reportdatasources -n openshift-metering | grep -v raw ----- -+ -Timestamps in the `NEWEST METRIC` column indicate that `ReportDataSource` resources are beginning to import new data. -+ -.Example output -[source,terminal] ----- -NAME EARLIEST METRIC NEWEST METRIC IMPORT START IMPORT END LAST IMPORT TIME AGE -node-allocatable-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:44Z 23h -node-allocatable-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:07Z 23h -node-capacity-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:52Z 23h -node-capacity-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:03Z 23h -persistentvolumeclaim-capacity-bytes 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:46Z 23h -persistentvolumeclaim-phase 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:36Z 23h -persistentvolumeclaim-request-bytes 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:03Z 23h -persistentvolumeclaim-usage-bytes 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:02Z 23h -pod-limit-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:02Z 23h -pod-limit-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:58:00Z 2021-07-01T19:11:00Z 2021-07-02T19:58:00Z 2021-07-02T19:59:06Z 23h -pod-persistentvolumeclaim-request-info 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:07Z 23h -pod-request-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:58:00Z 2021-07-01T19:11:00Z 2021-07-02T19:58:00Z 2021-07-02T19:58:57Z 23h -pod-request-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:55:32Z 23h -pod-usage-cpu-cores 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:54:55Z 23h -pod-usage-memory-bytes 2021-07-01T21:08:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:55:00Z 23h -report-ns-pvc-usage 5h36m -report-ns-pvc-usage-hourly ----- - -After all pods are ready and you have verified that new data is being imported, metering continues to collect data and report on your cluster. Review a previously xref:../metering/reports/metering-about-reports.adoc#metering-example-report-with-schedule_metering-about-reports[scheduled report] or create a xref:../metering/reports/metering-about-reports.adoc#metering-example-report-without-schedule_metering-about-reports[run-once metering report] to confirm the metering upgrade. diff --git a/metering/metering-usage-examples.adoc b/metering/metering-usage-examples.adoc deleted file mode 100644 index c522b516c96e..000000000000 --- a/metering/metering-usage-examples.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-usage-examples"] -= Examples of using metering -include::_attributes/common-attributes.adoc[] -:context: metering-usage-examples - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following example reports to get started measuring capacity, usage, and utilization in your cluster. These examples showcase the various types of reports metering offers, along with a selection of the predefined queries. - -== Prerequisites -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install metering] -* Review the details about xref:../metering/metering-using-metering#using-metering[writing and viewing reports]. - -include::modules/metering-cluster-capacity-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-usage-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-utilization-examples.adoc[leveloffset=+1] diff --git a/metering/metering-using-metering.adoc b/metering/metering-using-metering.adoc deleted file mode 100644 index 163b41c09a11..000000000000 --- a/metering/metering-using-metering.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-metering"] -= Using Metering -include::_attributes/common-attributes.adoc[] -:context: using-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Prerequisites - -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install Metering] -* Review the details about the available options that can be configured for a xref:../metering/reports/metering-about-reports.adoc#metering-about-reports[report] and how they function. - -include::modules/metering-writing-reports.adoc[leveloffset=+1] - -include::modules/metering-viewing-report-results.adoc[leveloffset=+1] diff --git a/metering/modules b/metering/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/metering/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/metering/reports/_attributes b/metering/reports/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/metering/reports/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/metering/reports/images b/metering/reports/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/metering/reports/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/metering/reports/metering-about-reports.adoc b/metering/reports/metering-about-reports.adoc deleted file mode 100644 index c909f6fd0ee4..000000000000 --- a/metering/reports/metering-about-reports.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-about-reports"] -= About Reports -include::_attributes/common-attributes.adoc[] -:context: metering-about-reports - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `Report` custom resource provides a method to manage periodic Extract Transform and Load (ETL) jobs using SQL queries. Reports are composed from other metering resources, such as `ReportQuery` resources that provide the actual SQL query to run, and `ReportDataSource` resources that define the data available to the `ReportQuery` and `Report` resources. - -Many use cases are addressed by the predefined `ReportQuery` and `ReportDataSource` resources that come installed with metering. Therefore, you do not need to define your own unless you have a use case that is not covered by these predefined resources. - -include::modules/metering-reports.adoc[leveloffset=+1] diff --git a/metering/reports/metering-storage-locations.adoc b/metering/reports/metering-storage-locations.adoc deleted file mode 100644 index ab06b989cfa3..000000000000 --- a/metering/reports/metering-storage-locations.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-storage-locations"] -= Storage locations -include::_attributes/common-attributes.adoc[] -:context: metering-storage-locations - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `StorageLocation` custom resource configures where data will be stored by the Reporting Operator. This includes the data collected from Prometheus, and the results produced by generating a `Report` custom resource. - -You only need to configure a `StorageLocation` custom resource if you want to store data in multiple locations, like multiple S3 buckets or both S3 and HDFS, or if you wish to access a database in Hive and Presto that was not created by metering. For most users this is not a requirement, and the xref:../../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[documentation on configuring metering] is sufficient to configure all necessary storage components. - -== Storage location examples - -The following example shows the built-in local storage option, and is configured to use Hive. By default, data is stored wherever Hive is configured to use storage, such as HDFS, S3, or a `ReadWriteMany` persistent volume claim (PVC). - -.Local storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: hive - labels: - operator-metering: "true" -spec: - hive: <1> - databaseName: metering <2> - unmanagedDatabase: false <3> ----- - -<1> If the `hive` section is present, then the `StorageLocation` resource will be configured to store data in Presto by creating the table using the Hive server. Only `databaseName` and `unmanagedDatabase` are required fields. -<2> The name of the database within hive. -<3> If `true`, the `StorageLocation` resource will not be actively managed, and the `databaseName` is expected to already exist in Hive. If `false`, the Reporting Operator will create the database in Hive. - -The following example uses an AWS S3 bucket for storage. The prefix is appended to the bucket name when constructing the path to use. - -.Remote storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" <1> ----- -<1> Optional: The filesystem URL for Presto and Hive to use for the database. This can be an `hdfs://` or `s3a://` filesystem URL. - -There are additional optional fields that can be specified in the `hive` section: - -* `defaultTableProperties`: Contains configuration options for creating tables using Hive. -* `fileFormat`: The file format used for storing files in the filesystem. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-StorageFormatsStorageFormatsRowFormat,StorageFormat,andSerDe[Hive Documentation on File Storage Format] for a list of options and more details. -* `rowFormat`: Controls the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[ Hive row format]. This controls how Hive serializes and deserializes rows. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[Hive Documentation on Row Formats and SerDe] for more details. - -== Default storage location -If an annotation `storagelocation.metering.openshift.io/is-default` exists and is set to `true` on a `StorageLocation` resource, then that resource becomes the default storage resource. Any components with a storage configuration option where the storage location is not specified will use the default storage resource. There can be only one default storage resource. If more than one resource with the annotation exists, an error is logged because the Reporting Operator cannot determine the default. - -.Default storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" - annotations: - storagelocation.metering.openshift.io/is-default: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" ----- diff --git a/metering/reports/modules b/metering/reports/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metering/reports/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metering/reports/snippets b/metering/reports/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/metering/reports/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/metering/snippets b/metering/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/metering/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/metrics/PLACEHOLDER b/metrics/PLACEHOLDER deleted file mode 100644 index 4020393e57eb..000000000000 --- a/metrics/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please delete this file once you have assemblies here. - diff --git a/metrics/_attributes b/metrics/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/metrics/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/metrics/images b/metrics/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/metrics/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/metrics/modules b/metrics/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metrics/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metrics/snippets b/metrics/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/metrics/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/microshift_cli_ref/_attributes b/microshift_cli_ref/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_cli_ref/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_cli_ref/images b/microshift_cli_ref/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_cli_ref/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_cli_ref/microshift-cli-tools-introduction.adoc b/microshift_cli_ref/microshift-cli-tools-introduction.adoc deleted file mode 100644 index f1c8f8b2255a..000000000000 --- a/microshift_cli_ref/microshift-cli-tools-introduction.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-cli-tools"] -= {product-title} CLI tools introduction -include::_attributes/attributes-microshift.adoc[] -:context: microshift-cli-tools-introduction - -toc::[] - -You can use different command-line interface (CLI) tools to build, deploy, and manage {product-title} clusters and workloads. With CLI tools, you can complete various administration and development operations from the terminal to manage deployments and interact with each component of the system. - -CLI tools available for use with {product-title} are the following: - -* Built-in `microshift` command types -* Linux CLI tools -* Kubernetes CLI (`kubectl`) -* The {oc-first} tool with an enabled subset of commands - -[NOTE] -==== -Commands for multi-node deployments, projects, and developer tooling are not supported by {product-title}. -==== - -[role="_additional-resources"] -[id="additional-resources_microshift-cli-tools"] -.Additional resources - -* xref:..//microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool for MicroShift]. - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/cli_tools/openshift-cli-oc[Detailed description of the OpenShift CLI (oc)]. - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9[Red Hat Enterprise Linux (RHEL) documentation for specific use cases]. \ No newline at end of file diff --git a/microshift_cli_ref/microshift-cli-using-oc.adoc b/microshift_cli_ref/microshift-cli-using-oc.adoc deleted file mode 100644 index 68b8a08ae508..000000000000 --- a/microshift_cli_ref/microshift-cli-using-oc.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-cli-using-oc"] -= Using the oc tool -include::_attributes/attributes-microshift.adoc[] -:context: microshift-using-oc - -toc::[] - -The optional OpenShift CLI (`oc`) tool provides a subset of `oc` commands for {product-title} deployments. Using `oc` is convenient if you are familiar with {OCP} and Kubernetes. - -include::modules/microshift-cli-oc-about.adoc[leveloffset=+1] - -[id="cli-using-cli_{context}"] -== Using the OpenShift CLI in {product-title} - -Review the following sections to learn how to complete common tasks in {product-title} using the `oc` CLI. - -[id="viewing-pods_{context}"] -=== Viewing pods - -Use the `oc get pods` command to view the pods for the current project. - -[NOTE] -==== -When you run `oc` inside a pod and do not specify a namespace, the namespace of the pod is used by default. -==== - -[source,terminal] ----- -$ oc get pods -o wide ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE -cakephp-ex-1-build 0/1 Completed 0 5m45s 10.131.0.10 ip-10-0-141-74.ec2.internal -cakephp-ex-1-deploy 0/1 Completed 0 3m44s 10.129.2.9 ip-10-0-147-65.ec2.internal -cakephp-ex-1-ktz97 1/1 Running 0 3m33s 10.128.2.11 ip-10-0-168-105.ec2.internal ----- - -[id="viewing-pod-logs_{context}"] -=== Viewing pod logs - -Use the `oc logs` command to view logs for a particular pod. - -[source,terminal] ----- -$ oc logs cakephp-ex-1-deploy ----- - -.Example output -[source,terminal] ----- ---> Scaling cakephp-ex-1 to 1 ---> Success ----- - -[id="listing-supported-apis_{context}"] -=== Listing supported API resources - -Use the `oc api-resources` command to view the list of supported API resources -on the server. - -[source,terminal] ----- -$ oc api-resources ----- - -.Example output -[source,terminal] ----- -NAME SHORTNAMES APIGROUP NAMESPACED KIND -bindings true Binding -componentstatuses cs false ComponentStatus -configmaps cm true ConfigMap -... ----- - -// Getting help -include::modules/microshift-cli-oc-get-help.adoc[leveloffset=+1] - -//Errors when using oc commands not enabled in MicroShift -include::modules/microshift-oc-apis-errors.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc b/microshift_cli_ref/microshift-oc-cli-commands-list.adoc deleted file mode 100644 index 65b735b52e61..000000000000 --- a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-oc-cli-commands"] -= OpenShift CLI command reference -include::_attributes/attributes-microshift.adoc[] -:context: microshift-oc-cli-commands - -toc::[] - -Descriptions and example commands for OpenShift CLI (`oc`) commands are included in this reference document. You must have `cluster-admin` or equivalent permissions to use these commands. To list administrator commands and information about them, use the following commands: - -* Enter the `oc adm -h` command to list all administrator commands: -+ -.Command syntax -+ -[source,terminal] ----- -$ oc adm -h ----- - -* Enter the `oc --help` command to get additional details for a specific command: -+ -.Command syntax -+ -[source,terminal] ----- -$ oc --help ----- - -[IMPORTANT] -==== -Using `oc --help` lists details for any `oc` command. Not all `oc` commands apply to using {product-title}. -==== - -// The OCP files are auto-generated from the openshift/oc repository; use the MicroShift-specific flags to generate MicroShift command files from the same repo -include::modules/microshift-oc-by-example-content.adoc[leveloffset=+1] - -include::modules/microshift-oc-adm-by-example-content.adoc[leveloffset=+1] diff --git a/microshift_cli_ref/microshift-oc-cli-install.adoc b/microshift_cli_ref/microshift-oc-cli-install.adoc deleted file mode 100644 index 51047052db69..000000000000 --- a/microshift_cli_ref/microshift-oc-cli-install.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-oc-cli-install"] -= Getting started with the OpenShift CLI -include::_attributes/attributes-microshift.adoc[] -:context: cli-oc-installing - -toc::[] - -To use the OpenShift CLI (`oc`) tool, you must download and install it separately from your {product-title} installation. - -[id="installing-the-openshift-cli"] -== Installing the OpenShift CLI - -You can install the OpenShift CLI (`oc`) either by downloading the binary or by using Homebrew. - -// Installing the CLI by downloading the binary -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -// Installing the CLI by using Homebrew -include::modules/cli-installing-cli-brew.adoc[leveloffset=+2] - -// Installing the CLI using RPM -include::modules/cli-installing-cli-rpm.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_cli_ref/microshift-oc-config.adoc b/microshift_cli_ref/microshift-oc-config.adoc deleted file mode 100644 index 166c16641d11..000000000000 --- a/microshift_cli_ref/microshift-oc-config.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-configuring-cli"] -= Configuring the OpenShift CLI -include::_attributes/attributes-microshift.adoc[] -:context: cli-configuring-cli - -toc::[] - -Configure `oc` based on your preferences for working with it. - -[id="cli-enabling-tab-completion"] -== Enabling tab completion - -You can enable tab completion for the Bash or Zsh shells. - -// Enabling tab completion for Bash -include::modules/cli-configuring-completion.adoc[leveloffset=+2] - -// Enabling tab completion for Zsh -include::modules/cli-configuring-completion-zsh.adoc[leveloffset=+2] diff --git a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc b/microshift_cli_ref/microshift-usage-oc-kubectl.adoc deleted file mode 100644 index a1016ff3ab97..000000000000 --- a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-usage-oc-kubectl"] -= Using oc and kubectl commands -include::_attributes/attributes-microshift.adoc[] -:context: usage-oc-kubectl - -toc::[] - -The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` CLI tool that ships with {product-title}, or you can gain extended functionality by using the `oc` CLI tool. - -[id="microshift-kubectl-binary_{context}"] -== The kubectl CLI tool - -You can use the `kubectl` CLI tool to interact with Kubernetes primitives on your {product-title} cluster. You can also use existing `kubectl` workflows and scripts for new {product-title} users coming from another Kubernetes environment, or for those who prefer to use the `kubectl` CLI. - -The `kubectl` CLI tool is included in the archive if you download the `oc` CLI tool. - -For more information, read the link:https://kubernetes.io/docs/reference/kubectl/overview/[Kubernetes CLI tool documentation]. - -[id="microshift-oc-binary_{context}"] -== The oc CLI tool - -The `oc` CLI tool offers the same capabilities as the `kubectl` CLI tool, but it extends to natively support additional {product-title} features, including: - -* **Route resource** -+ -The `Route` resource object is specific to {product-title} distributions, and builds upon standard Kubernetes primitives. -+ -* **Additional commands** -+ -The additional command `oc new-app`, for example, makes it easier to get new applications started using existing source code or pre-built images. - -[IMPORTANT] -==== -If you installed an earlier version of the `oc` CLI tool, you cannot use it to complete all of the commands in {product-title} {ocp-version}. If you want the latest features, you must download and install the latest version of the `oc` CLI tool corresponding to your {product-title} version. -==== - -Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server. - -.Compatibility Matrix - -[cols="1,1,1"] -|=== - -| -|*X.Y* (`oc` Client) -|*X.Y+N* footnote:versionpolicyn[Where *N* is a number greater than or equal to 1.] (`oc` Client) - -|*X.Y* (Server) -|image:redcircle-1.png[] -|image:redcircle-3.png[] - -|*X.Y+N* footnote:versionpolicyn[] (Server) -|image:redcircle-2.png[] -|image:redcircle-1.png[] - -|=== -image:redcircle-1.png[] Fully compatible. - -image:redcircle-2.png[] `oc` client might not be able to access server features. - -image:redcircle-3.png[] `oc` client might provide options and features that might not be compatible with the accessed server. diff --git a/microshift_cli_ref/modules b/microshift_cli_ref/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_cli_ref/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_cli_ref/snippets b/microshift_cli_ref/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_cli_ref/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_configuring/_attributes b/microshift_configuring/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_configuring/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_configuring/images b/microshift_configuring/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_configuring/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_configuring/microshift-cluster-access-kubeconfig.adoc b/microshift_configuring/microshift-cluster-access-kubeconfig.adoc deleted file mode 100644 index 78b503705d15..000000000000 --- a/microshift_configuring/microshift-cluster-access-kubeconfig.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-kubeconfig"] -= Cluster access with kubeconfig -include::_attributes/attributes-microshift.adoc[] -:context: microshift-kubeconfig - -toc::[] - -Learn about how `kubeconfig` files are used with {product-title} deployments. CLI tools use `kubeconfig` files to communicate with the API server of a cluster. These files provide cluster details, IP addresses, and other information needed for authentication. - -include::modules/microshift-kubeconfig-overview.adoc[leveloffset=+1] - -include::modules/microshift-kubeconfig-local-access.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-kubeconfig-remote-con.adoc[leveloffset=+1] - -include::modules/microshift-kubeconfig-generating-remote-kcfiles.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_configuring/microshift-using-config-tools.adoc b/microshift_configuring/microshift-using-config-tools.adoc deleted file mode 100644 index c8f77a24a629..000000000000 --- a/microshift_configuring/microshift-using-config-tools.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-using-config-tools"] -= How configuration tools work -include::_attributes/attributes-microshift.adoc[] -:context: microshift-configuring - -toc::[] - -A YAML file customizes {product-title} instances with your preferences, settings, and parameters. - -include::modules/microshift-config-yaml.adoc[leveloffset=+1] - -include::modules/microshift-config-nodeport-limits.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_configuring/modules b/microshift_configuring/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_configuring/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_configuring/snippets b/microshift_configuring/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_configuring/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_getting_started/_attributes b/microshift_getting_started/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_getting_started/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_getting_started/images b/microshift_getting_started/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_getting_started/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_getting_started/microshift-architecture.adoc b/microshift_getting_started/microshift-architecture.adoc deleted file mode 100644 index 038212cde83f..000000000000 --- a/microshift_getting_started/microshift-architecture.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-architecture"] -= Architecture -include::_attributes/attributes-microshift.adoc[] -include::_attributes/common-attributes.adoc[] -:context: microshift-architecture - -toc::[] - -Learn the specifics of {product-title} architecture including design intent, how it differs from {oke}, and API compatibility. - -[id="microshift-architectural-design_{context}"] -== Architectural design -{product-title} is a single-node container orchestration runtime designed to extend the benefits of using containers for running applications to low-resource edge environments. Because {product-title} is primarily a platform for deploying applications, only the APIs and features essential to operating in edge and small form factor computing environments are included. - -For example, {product-title} contains only the following Kubernetes cluster capabilities: - -* Networking -* Ingress -* Storage -* Helm - -{product-title} also provides the following Kubernetes functions: - -* Orchestration -* Security - -To optimize your deployments, use {product-title} with a compatible operating system, such as {op-system-ostree-first}. Using {product-title} and {op-system-ostree-first} together forms {op-system-bundle}. Virtual machines are handled by the operating system in {product-title} deployments. - -.{product-title} as part of {op-system-bundle}. -image::311_RHDevice_Edge_Overview_0223_1.png[<{product-title} is tasked with only the Kubernetes cluster services networking, ingress, storage, helm, with additional Kubernetes functions of orchestration and security, as the following diagram illustrates.>] - -The following operational differences from {oke} can help you understand where {product-title} can be deployed: - -[id="microshift-differences-oke_{context}"] -== Key differences from {oke} - -* Devices with {product-title} installed are self-managing -* Compatible with RPM-OStree-based systems -* Uses only the APIs needed for essential functions, such as security and runtime controls -* Enables a subset of commands from the OpenShift CLI (`oc`) tool -* Does not support workload high availability (HA) or horizontal scalability with the addition of worker nodes - -.{product-title} differences from {oke}. -image::311_RHDevice_Edge_Overview_0223_2.png[<{product-title} is tasked with only the Kubernetes cluster capabilities of networking, ingress, storage, helm, with the additional Kubernetes functions of orchestration and security, as the following diagram illustrates.>] - -Figure 2 shows that {oke} has the same cluster capabilities as {product-title}, and adds the following: - -* Install -* Over-the-air updates -* Cluster Operators -* Operator Lifecycle Manager -* Monitoring -* Logging -* Registry -* Authorization -* Console -* Cloud Integration -* Virtual Machines (VMs) through {VirtProductName} - -In {oke} and other {OCP} deployments, all of the components from the operating system through the cluster capabilities work as one comprehensive unit, with full cluster services for a multi-node Kubernetes workload. With {product-title}, functions such as over-the-air-updates, monitoring, and logging, are performed by the operating system. - -[id="microshift-openshift-apis_{context}"] -== {product-title} OpenShift APIs - -In addition to standard Kubernetes APIs, {product-title} includes a small subset of the APIs supported by {OCP}. - -[cols="1,1",options="header"] -|=== -^| API ^| API group -| xref:../microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[Route] -| route.openshift.io/v1 -| xref:../microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[SecurityContextConstraints] -| security.openshift.io/v1 -|=== - -include::modules/microshift-k8s-apis.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_getting_started/microshift-understanding.adoc b/microshift_getting_started/microshift-understanding.adoc deleted file mode 100644 index 1cd5f1b01264..000000000000 --- a/microshift_getting_started/microshift-understanding.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="con-microshift-understanding"] -= Understanding {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: microshift-understanding - -toc::[] - -Get an overview of what you can do with {product-title}, a Kubernetes distribution derived from {OCP} that is designed for optimizing small form factor devices and edge computing. - -include::modules/microshift-about.adoc[leveloffset=+1] diff --git a/microshift_getting_started/modules b/microshift_getting_started/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_getting_started/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_getting_started/snippets b/microshift_getting_started/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_getting_started/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_install/_attributes b/microshift_install/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_install/images b/microshift_install/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_install/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_install/microshift-embed-in-rpm-ostree.adoc b/microshift_install/microshift-embed-in-rpm-ostree.adoc deleted file mode 100644 index 202e38dc9bd3..000000000000 --- a/microshift_install/microshift-embed-in-rpm-ostree.adoc +++ /dev/null @@ -1,59 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-embed-in-rpm-ostree"] -= Embedding {product-title} in a {op-system-ostree} image -include::_attributes/attributes-microshift.adoc[] -:context: microshift-embed-in-rpm-ostree - -toc::[] - -You can embed {product-title} into a {op-system-ostree-first} {op-system-version} image. Use this guide to build a {op-system} image containing {product-title}. - -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-preparing-for-image-building.adoc[leveloffset=+1] - -include::modules/microshift-adding-repos-to-image-builder.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-image-builder-system-requirements_setting-up-image-builder[Image Builder system requirements] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-installing-image-builder_setting-up-image-builder[Installing Image Builder] - - -include::modules/microshift-adding-service-to-blueprint.adoc[leveloffset=+1] - -include::modules/microshift-creating-ostree-iso.adoc[leveloffset=+1] - -include::modules/microshift-add-blueprint-build-iso.adoc[leveloffset=+1] - -include::modules/microshift-download-iso-prep-for-use.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/composing-a-rhel-for-edge-image-using-image-builder-command-line_composing-installing-managing-rhel-for-edge-images[Creating a RHEL for Edge Container blueprint using image builder CLI] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/composing-a-rhel-for-edge-image-using-image-builder-command-line_composing-installing-managing-rhel-for-edge-images#image-customizations_composing-a-rhel-for-edge-image-using-image-builder-command-line[Supported image customizations] -* link:https://www.osbuild.org/guides/image-builder-on-premises/building-ostree-images.html#building-ostree-image[Building ostree images] -* link:https://www.osbuild.org/guides/image-builder-on-premises/blueprint-reference.html[Blueprint reference] -* link:https://podman.io/docs/installation[Installing podman] - -include::modules/microshift-provisioning-ostree.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/composing_installing_and_managing_rhel_for_edge_images/index[{op-system-ostree} documentation] -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-system-requirements_microshift-install-rpm[System requirements for installing MicroShift] -* link:https://console.redhat.com/openshift/install/pull-secret[Red Hat Hybrid Cloud Console pull secret] -* xref:../microshift_networking/microshift-firewall.adoc#microshift-firewall-req-settings_microshift-firewall[Required firewall settings] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/performing_an_advanced_rhel_9_installation/creating-kickstart-files_installing-rhel-as-an-experienced-user[Creating a Kickstart file] -* link:https://access.redhat.com/solutions/60959[How to embed a Kickstart file into an ISO image] - -include::modules/microshift-accessing.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] diff --git a/microshift_install/microshift-greenboot.adoc b/microshift_install/microshift-greenboot.adoc deleted file mode 100644 index 58070df51102..000000000000 --- a/microshift_install/microshift-greenboot.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-greenboot"] -= The greenboot health check -include::_attributes/attributes-microshift.adoc[] -:context: microshift-greenboot - -toc::[] - -Greenboot is the generic health check framework for the `systemd` service on RPM-OSTree-based systems. The `microshift-greenboot` RPM and `greenboot-default-health-check` are optional RPM packages you can install. Greenboot is used to assess system health and automate a rollback to the last healthy state in the event of software trouble. - -This health check framework is especially useful when you need to check for software problems and perform system rollbacks on edge devices where direct serviceability is either limited or non-existent. When health check scripts are installed and configured, health checks run every time the system starts. - -Using greenboot can reduce your risk of being locked out of edge devices during updates and prevent a significant interruption of service if an update fails. When a failure is detected, the system boots into the last known working configuration using the `rpm-ostree` rollback capability. - -A {product-title} health check script is included in the `microshift-greenboot` RPM. The `greenboot-default-health-check` RPM includes health check scripts verifying that DNS and `ostree` services are accessible. You can also create your own health check scripts based on the workloads you are running. You can write one that verifies that an application has started, for example. - -[NOTE] -==== -Rollback is not possible in the case of an update failure on a system not using OSTree. This is true even though health checks might run. -==== - -include::modules/microshift-greenboot-dir-structure.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-microshift-health-script.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-systemd-journal-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_running_apps/microshift-applications.adoc#microshift-manifests-example_applications-microshift[Auto applying manifests] - -include::modules/microshift-greenboot-updates-workloads.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-workloads-validation.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-health-check-log.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-prerollback-log.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-check-update.adoc[leveloffset=+1] - -//[role="_additional-resources_microshift-greenboot"] -//.Additional resources -//once the greenboot application health check is merged, an assembly-level xref can go here \ No newline at end of file diff --git a/microshift_install/microshift-install-rpm.adoc b/microshift_install/microshift-install-rpm.adoc deleted file mode 100644 index abd5e837642f..000000000000 --- a/microshift_install/microshift-install-rpm.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-install-rpm"] -= Installing {product-title} from an RPM package -include::_attributes/attributes-microshift.adoc[] -:context: microshift-install-rpm - -toc::[] - -You can install {product-title} from an RPM package on a machine with {op-system-base-full} {op-system-version}. - -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-install-system-requirements.adoc[leveloffset=+1] - -include::modules/microshift-install-rpm-before.adoc[leveloffset=+1] - -include::modules/microshift-install-rpm-preparing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* Download the link:https://console.redhat.com/openshift/install/pull-secret[pull secret] from the Red Hat Hybrid Cloud Console. -* xref:../microshift_configuring/microshift-using-config-tools.adoc#microshift-using-config-tools[Configuring MicroShift]. -* For more options on partition configuration, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/performing_a_standard_rhel_9_installation/index#manual-partitioning_graphical-installation[Configuring Manual Partitioning]. -* For more information about resizing your existing LVs to free up capacity in your VGs, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_and_managing_logical_volumes/index#managing-lvm-volume-groups_configuring-and-managing-logical-volumes[Managing LVM Volume Groups]. -* For more information about creating VGs and PVs, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_logical_volumes/overview-of-logical-volume-management_configuring-and-managing-logical-volumes[Overview of logical volume management]. - -include::modules/microshift-install-rpms.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-system-requirements_microshift-install-rpm[System requirements for installing MicroShift]. -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-rpm-preparing_microshift-install-rpm[Preparing to install MicroShift from an RPM package]. - -include::modules/microshift-service-starting.adoc[leveloffset=+1] - -include::modules/microshift-service-stopping.adoc[leveloffset=+1] - -include::modules/microshift-accessing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool]. - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] - -//note: additional resources are deliberately set without ID and context so that they trail modules; these are not intended to appear as assembly-level additional resources \ No newline at end of file diff --git a/microshift_install/modules b/microshift_install/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_install/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_install/snippets b/microshift_install/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_networking/_attributes b/microshift_networking/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_networking/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_networking/images b/microshift_networking/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_networking/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_networking/microshift-firewall.adoc b/microshift_networking/microshift-firewall.adoc deleted file mode 100644 index ccd6d7bba098..000000000000 --- a/microshift_networking/microshift-firewall.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-using-a-firewall"] -= Using a firewall -include::_attributes/attributes-microshift.adoc[] -:context: microshift-firewall - -toc::[] - -Firewalls are not required in {product-title}, but using a firewall can prevent undesired access to the {product-title} API. - -include::modules/microshift-firewall-about.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources"] -.Additional resources - -* xref:../microshift_networking/microshift-firewall.adoc#microshift-firewall-req-settings_microshift-firewall[Required firewall settings] -* xref:..//microshift_networking/microshift-firewall.adoc#microshift-firewall-allow-traffic_microshift-firewall[Allowing network traffic through the firewall] - -include::modules/microshift-firewalld-install.adoc[leveloffset=+1] -include::modules/microshift-firewall-req-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-opt-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-allow-traffic.adoc[leveloffset=+1] -include::modules/microshift-firewall-apply-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-verify-settings.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-using-a-firewall"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_firewalls_and_packet_filters/using-and-configuring-firewalld_firewall-packet-filters[RHEL: Using and configuring firewalld] - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_firewalls_and_packet_filters/using-and-configuring-firewalld_firewall-packet-filters#viewing-the-current-status-and-settings-of-firewalld_using-and-configuring-firewalld[RHEL: Viewing the current status of firewalld] - -include::modules/microshift-firewall-known-issue.adoc[leveloffset=+1] diff --git a/microshift_networking/microshift-networking.adoc b/microshift_networking/microshift-networking.adoc deleted file mode 100644 index ba1bdbd99cbd..000000000000 --- a/microshift_networking/microshift-networking.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-applying-networking-settings"] -= Understanding networking settings -include::_attributes/attributes-microshift.adoc[] -:context: microshift-networking - -toc::[] - -Learn how to apply networking customization and default settings to {product-title} deployments. Each node is contained to a single machine and single {product-title}, so each deployment requires individual configuration, pods, and settings. - -Cluster Administrators have several options for exposing applications that run inside a cluster to external traffic and securing network connections: - -* A service such as NodePort - -* API resources, such as `Ingress` and `Route` - -By default, Kubernetes allocates each pod an internal IP address for applications running within the pod. Pods and their containers can have traffic between them, but clients outside the cluster do not have direct network access to pods except when exposed with a service such as NodePort. - -[NOTE] -==== -To troubleshoot connection problems with the NodePort service, read about the known issue in the Release Notes. -==== - -include::modules/microshift-cni.adoc[leveloffset=+1] - -include::modules/microshift-configuring-ovn.adoc[leveloffset=+1] - -include::modules/microshift-restart-ovnkube-master.adoc[leveloffset=+1] - -include::modules/microshift-http-proxy.adoc[leveloffset=+1] - -include::modules/microshift-rpm-ostree-https.adoc[leveloffset=+1] - -include::modules/microshift-cri-o-container-runtime.adoc[leveloffset=+1] - -include::modules/microshift-ovs-snapshot.adoc[leveloffset=+1] - -include::modules/microshift-deploying-a-load-balancer.adoc[leveloffset=+1] - -include::modules/microshift-blocking-nodeport-access.adoc[leveloffset=+1] - -include::modules/microshift-mDNS.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-understanding-networking-settings"] -.Additional resources - -* xref:../microshift_release_notes/microshift-4-14-release-notes.adoc#microshift-4-14-known-issues[{product-title} {product-version} release notes --> Known issues] diff --git a/microshift_networking/modules b/microshift_networking/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_networking/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_networking/snippets b/microshift_networking/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_networking/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_release_notes/_attributes b/microshift_release_notes/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_release_notes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_release_notes/images b/microshift_release_notes/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_release_notes/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_release_notes/microshift-4-12-release-notes.adoc b/microshift_release_notes/microshift-4-12-release-notes.adoc deleted file mode 100644 index dae99bd25c8a..000000000000 --- a/microshift_release_notes/microshift-4-12-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-12-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/microshift-4-13-release-notes.adoc b/microshift_release_notes/microshift-4-13-release-notes.adoc deleted file mode 100644 index 1a1acab21ad5..000000000000 --- a/microshift_release_notes/microshift-4-13-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-13-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/microshift-4-14-release-notes.adoc b/microshift_release_notes/microshift-4-14-release-notes.adoc deleted file mode 100644 index 0cdebd29ac07..000000000000 --- a/microshift_release_notes/microshift-4-14-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-14-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/modules b/microshift_release_notes/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_release_notes/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_release_notes/snippets b/microshift_release_notes/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_release_notes/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_rest_api/modules b/microshift_rest_api/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_rest_api/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc b/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc deleted file mode 100644 index a3165a17636c..000000000000 --- a/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc +++ /dev/null @@ -1,1204 +0,0 @@ -// Automatically generated by 'openshift-apidocs-gen'. Do not edit. -:_content-type: ASSEMBLY -[id="route-route-openshift-io-v1"] -= Route [route.openshift.io/v1] -:toc: macro -:toc-title: - -toc::[] - - -Description:: -+ --- -A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. - -Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. - -Routers are subject to additional customization and may support additional controls via the annotations field. - -Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. - -To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. - -Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). --- - -Type:: - `object` - -Required:: - - `spec` - - -== Specification - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `apiVersion` -| `string` -| APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - -| `kind` -| `string` -| Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -| `metadata` -| `ObjectMeta_v2` -| - -| `spec` -| `object` -| RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response. - -The `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate. - -| `status` -| `object` -| RouteStatus provides relevant info about the status of a route, including which routers acknowledge it. - -|=== -=== .spec -Description:: -+ --- -RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response. - -The `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate. --- - -Type:: - `object` - -Required:: - - `to` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `alternateBackends` -| `array` -| alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. - -| `alternateBackends[]` -| `object` -| RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. - -| `host` -| `string` -| host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. - -| `path` -| `string` -| path that the router watches for, to route traffic for to the service. Optional - -| `port` -| `object` -| RoutePort defines a port mapping from a router to an endpoint in the service endpoints. - -| `subdomain` -| `string` -| subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. - -Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. - -| `tls` -| `object` -| TLSConfig defines config used to secure a route and provide termination - -| `to` -| `object` -| RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. - -| `wildcardPolicy` -| `string` -| Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. - -|=== -=== .spec.alternateBackends -Description:: -+ --- -alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. --- - -Type:: - `array` - - - - -=== .spec.alternateBackends[] -Description:: -+ --- -RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. --- - -Type:: - `object` - -Required:: - - `kind` - - `name` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `kind` -| `string` -| The kind of target that the route is referring to. Currently, only 'Service' is allowed - -| `name` -| `string` -| name of the service/target that is being referred to. e.g. name of the service - -| `weight` -| `integer` -| weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. - -|=== -=== .spec.port -Description:: -+ --- -RoutePort defines a port mapping from a router to an endpoint in the service endpoints. --- - -Type:: - `object` - -Required:: - - `targetPort` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `targetPort` -| `IntOrString` -| The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required - -|=== -=== .spec.tls -Description:: -+ --- -TLSConfig defines config used to secure a route and provide termination --- - -Type:: - `object` - -Required:: - - `termination` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCertificate` -| `string` -| caCertificate provides the cert authority certificate contents - -| `certificate` -| `string` -| certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. - -| `destinationCACertificate` -| `string` -| destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. - -| `insecureEdgeTerminationPolicy` -| `string` -| insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. - -* Allow - traffic is sent to the server on the insecure port (default) * Disable - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port. - -| `key` -| `string` -| key provides key file contents - -| `termination` -| `string` -| termination indicates termination type. - -* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend - -|=== -=== .spec.to -Description:: -+ --- -RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. --- - -Type:: - `object` - -Required:: - - `kind` - - `name` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `kind` -| `string` -| The kind of target that the route is referring to. Currently, only 'Service' is allowed - -| `name` -| `string` -| name of the service/target that is being referred to. e.g. name of the service - -| `weight` -| `integer` -| weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. - -|=== -=== .status -Description:: -+ --- -RouteStatus provides relevant info about the status of a route, including which routers acknowledge it. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `ingress` -| `array` -| ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` - -| `ingress[]` -| `object` -| RouteIngress holds information about the places where a route is exposed. - -|=== -=== .status.ingress -Description:: -+ --- -ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` --- - -Type:: - `array` - - - - -=== .status.ingress[] -Description:: -+ --- -RouteIngress holds information about the places where a route is exposed. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `conditions` -| `array` -| Conditions is the state of the route, may be empty. - -| `conditions[]` -| `object` -| RouteIngressCondition contains details for the current condition of this route on a particular router. - -| `host` -| `string` -| Host is the host string under which the route is exposed; this value is required - -| `routerCanonicalHostname` -| `string` -| CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. - -| `routerName` -| `string` -| Name is a name chosen by the router to identify itself; this value is required - -| `wildcardPolicy` -| `string` -| Wildcard policy is the wildcard policy that was allowed where this route is exposed. - -|=== -=== .status.ingress[].conditions -Description:: -+ --- -Conditions is the state of the route, may be empty. --- - -Type:: - `array` - - - - -=== .status.ingress[].conditions[] -Description:: -+ --- -RouteIngressCondition contains details for the current condition of this route on a particular router. --- - -Type:: - `object` - -Required:: - - `type` - - `status` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `lastTransitionTime` -| `Time` -| RFC 3339 date and time when this condition last transitioned - -| `message` -| `string` -| Human readable message indicating details about last transition. - -| `reason` -| `string` -| (brief) reason for the condition's last transition, and is usually a machine and human readable constant - -| `status` -| `string` -| Status is the status of the condition. Can be True, False, Unknown. - -| `type` -| `string` -| Type is the type of the condition. Currently only Admitted. - -|=== - -== API endpoints - -The following API endpoints are available: - -* `/apis/route.openshift.io/v1/routes` -- `GET`: list or watch objects of kind Route -* `/apis/route.openshift.io/v1/watch/routes` -- `GET`: watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes` -- `DELETE`: delete collection of Route -- `GET`: list or watch objects of kind Route -- `POST`: create a Route -* `/apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes` -- `GET`: watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}` -- `DELETE`: delete a Route -- `GET`: read the specified Route -- `PATCH`: partially update the specified Route -- `PUT`: replace the specified Route -* `/apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes/{name}` -- `GET`: watch changes to an object of kind Route. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}/status` -- `GET`: read status of the specified Route -- `PATCH`: partially update status of the specified Route -- `PUT`: replace status of the specified Route - - -=== /apis/route.openshift.io/v1/routes - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - list or watch objects of kind Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `RouteList` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/routes - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete collection of Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - list or watch objects of kind Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `RouteList` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `POST` - -Description:: - create a Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 202 - Accepted -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete a Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 202 - Accepted -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - read the specified Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). -| `force` -| `boolean` -| Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch changes to an object of kind Route. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}/status - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `GET` - -Description:: - read status of the specified Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update status of the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). -| `force` -| `boolean` -| Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace status of the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - diff --git a/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc b/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc deleted file mode 100644 index 3b17cbb183a4..000000000000 --- a/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc +++ /dev/null @@ -1,660 +0,0 @@ -// Automatically generated by 'openshift-apidocs-gen'. Do not edit. -:_content-type: ASSEMBLY -[id="securitycontextconstraints-security-openshift-io-v1"] -= SecurityContextConstraints [security.openshift.io/v1] -:toc: macro -:toc-title: - -toc::[] - - -Description:: -+ --- -SecurityContextConstraints (SCC) governs the ability to make requests that affect the SecurityContext that applies to a container. Use the security.openshift.io group to manage SecurityContextConstraints. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). --- - -Type:: - `object` - -Required:: - - `allowHostDirVolumePlugin` - - `allowHostIPC` - - `allowHostNetwork` - - `allowHostPID` - - `allowHostPorts` - - `allowPrivilegedContainer` - - `readOnlyRootFilesystem` - - -== Specification - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `allowHostDirVolumePlugin` -| `boolean` -| AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin - -| `allowHostIPC` -| `boolean` -| AllowHostIPC determines if the policy allows host ipc in the containers. - -| `allowHostNetwork` -| `boolean` -| AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - -| `allowHostPID` -| `boolean` -| AllowHostPID determines if the policy allows host pid in the containers. - -| `allowHostPorts` -| `boolean` -| AllowHostPorts determines if the policy allows host ports in the containers. - -| `allowPrivilegeEscalation` -| `` -| AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. - -| `allowPrivilegedContainer` -| `boolean` -| AllowPrivilegedContainer determines if a container can request to be run as privileged. - -| `allowedCapabilities` -| `` -| AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'. - -| `allowedFlexVolumes` -| `` -| AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field. - -| `allowedUnsafeSysctls` -| `` -| AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - Examples: e.g. "foo/*" allows "foo/bar", "foo/baz", etc. e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - -| `apiVersion` -| `string` -| APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - -| `defaultAddCapabilities` -| `` -| DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities. - -| `defaultAllowPrivilegeEscalation` -| `` -| DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. - -| `forbiddenSysctls` -| `` -| ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - Examples: e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - -| `fsGroup` -| `` -| FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - -| `groups` -| `` -| The groups that have permission to use this security context constraints - -| `kind` -| `string` -| Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -| `metadata` -| `ObjectMeta` -| Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -| `priority` -| `` -| Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name. - -| `readOnlyRootFilesystem` -| `boolean` -| ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. - -| `requiredDropCapabilities` -| `` -| RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. - -| `runAsUser` -| `` -| RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. - -| `seLinuxContext` -| `` -| SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. - -| `seccompProfiles` -| `` -| SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default. - -| `supplementalGroups` -| `` -| SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - -| `users` -| `` -| The users who have permissions to use this security context constraints - -| `volumes` -| `` -| Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"]. - -|=== - -== API endpoints - -The following API endpoints are available: - -* `/apis/security.openshift.io/v1/securitycontextconstraints` -- `DELETE`: delete collection of SecurityContextConstraints -- `GET`: list objects of kind SecurityContextConstraints -- `POST`: create SecurityContextConstraints -* `/apis/security.openshift.io/v1/watch/securitycontextconstraints` -- `GET`: watch individual changes to a list of SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/security.openshift.io/v1/securitycontextconstraints/{name}` -- `DELETE`: delete SecurityContextConstraints -- `GET`: read the specified SecurityContextConstraints -- `PATCH`: partially update the specified SecurityContextConstraints -- `PUT`: replace the specified SecurityContextConstraints -* `/apis/security.openshift.io/v1/watch/securitycontextconstraints/{name}` -- `GET`: watch changes to an object of kind SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -=== /apis/security.openshift.io/v1/securitycontextconstraints - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete collection of SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - list objects of kind SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `SecurityContextConstraintsList` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `POST` - -Description:: - create SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 201 - Created -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 202 - Accepted -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/watch/securitycontextconstraints - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/securitycontextconstraints/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the SecurityContextConstraints -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 202 - Accepted -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - read the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 201 - Created -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/watch/securitycontextconstraints/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the SecurityContextConstraints -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch changes to an object of kind SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - diff --git a/microshift_rest_api/understanding-api-support-tiers.adoc b/microshift_rest_api/understanding-api-support-tiers.adoc deleted file mode 100644 index 73a078930fbe..000000000000 --- a/microshift_rest_api/understanding-api-support-tiers.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-api-support-tiers"] -= Understanding API tiers -include::_attributes/common-attributes.adoc[] -:context: understanding-api-tiers - -toc::[] - -[IMPORTANT] -==== -This guidance does not cover layered {product-title} offerings. -==== - -Red Hat requests that application developers validate that any behavior they depend on is explicitly defined in the formal API documentation to prevent introducing dependencies on unspecified implementation-specific behavior or dependencies on bugs in a particular implementation of an API. For example, new releases of an ingress router may not be compatible with older releases if an application uses an undocumented API or relies on undefined behavior. - -include::modules/api-support-tiers.adoc[leveloffset=+1] - -include::modules/api-support-tiers-mapping.adoc[leveloffset=+1] - -include::modules/api-support-deprecation-policy.adoc[leveloffset=+1] diff --git a/microshift_rest_api/understanding-compatibility-guidelines.adoc b/microshift_rest_api/understanding-compatibility-guidelines.adoc deleted file mode 100644 index b2153251d9e2..000000000000 --- a/microshift_rest_api/understanding-compatibility-guidelines.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="compatibility-guidelines"] -= Understanding API compatibility guidelines -include::_attributes/common-attributes.adoc[] -:context: compatibility-guidelines - -toc::[] - -[IMPORTANT] -==== -This guidance does not cover layered {product-title} offerings. -==== - -include::modules/api-compatibility-guidelines.adoc[leveloffset=+1] - -include::modules/api-compatibility-exceptions.adoc[leveloffset=+1] - -include::modules/api-compatibility-common-terminology.adoc[leveloffset=+1] diff --git a/microshift_running_apps/_attributes b/microshift_running_apps/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_running_apps/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_running_apps/images b/microshift_running_apps/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_running_apps/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_running_apps/microshift-applications.adoc b/microshift_running_apps/microshift-applications.adoc deleted file mode 100644 index aa25d0c15308..000000000000 --- a/microshift_running_apps/microshift-applications.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="applications-with-microshift"] -= Application deployment with {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: applications-microshift - -toc::[] - -You can use the `kustomize` configuration management tool to deploy applications. Read through the following procedure for an example of how this tool works in {product-title}. - -include::modules/microshift-manifests-overview.adoc[leveloffset=+1] -include::modules/microshift-applying-manifests-example.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_running_apps/microshift-greenboot-workload-scripts.adoc b/microshift_running_apps/microshift-greenboot-workload-scripts.adoc deleted file mode 100644 index 9f094963b251..000000000000 --- a/microshift_running_apps/microshift-greenboot-workload-scripts.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-greenboot-workload-scripts"] -= Greenboot workload health check scripts -include::_attributes/attributes-microshift.adoc[] -:context: microshift-greenboot-workload-scripts - -toc::[] - -Greenboot health check scripts are helpful on edge devices where direct serviceability is either limited or non-existent. If you installed the `microshift-greenboot` RPM package, you can also create health check scripts assess the health of your workloads and applications. These additional health check scripts are useful components of software problem checks and automatic system rollbacks. - -A {product-title} health check script is included in the `microshift-greenboot` RPM. You can also create your own health check scripts based on the workloads you are running. For example, you can write one that verifies that a service has started. - -include::modules/microshift-greenboot-how-workload-health-check-scripts-work.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-included-health-checks.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-create-health-check-script.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-testing-workload-script.adoc[leveloffset=+1] - -[id="additional-resources_microshift-greenboot-workload-scripts"] -[role="_additional-resources"] -.Additional resources -* xref:../microshift_install/microshift-greenboot.adoc#microshift-greenboot[The greenboot health check] -* xref:../microshift_running_apps/microshift-applications.adoc#microshift-manifests-example_applications-microshift[Auto applying manifests] diff --git a/microshift_running_apps/microshift-operators.adoc b/microshift_running_apps/microshift-operators.adoc deleted file mode 100644 index bfee1f5a18c4..000000000000 --- a/microshift_running_apps/microshift-operators.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="operators-with-microshift"] -= How Operators work with {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: operators-microshift - -toc::[] - -You can use Operators with {product-title} to create applications that monitor the running services in your cluster. Operators can manage applications and their resources, such as deploying a database or message bus. As customized software running inside your cluster, Operators can be used to implement and automate common operations. - -Operators offer a more localized configuration experience and integrate with Kubernetes APIs and CLI tools such as `kubectl` and `oc`. Operators are designed specifically for your applications. Operators enable you to configure components instead of modifying a global configuration file. - -{product-title} applications are generally expected to be deployed in static environments. However, Operators are available if helpful in your use case. To determine an Operator's compatibility with {product-title}, check the Operator's documentation. - -[id="how-to-install-operators_{context}"] -== How to install Operators in {product-title} - -To minimize the footprint of {product-title}, Operators are installed directly with manifests instead of using the Operator Lifecycle Manager (OLM). The following examples provide instructions on how you can use the `kustomize` configuration management tool with {product-title} to deploy an application. Use the same steps to install Operators with manifests. - -include::modules/microshift-manifests-overview.adoc[leveloffset=+2] - -include::modules/microshift-applying-manifests-example.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_running_apps/modules b/microshift_running_apps/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_running_apps/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_running_apps/snippets b/microshift_running_apps/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_running_apps/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_storage/_attributes b/microshift_storage/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_storage/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/_attributes b/microshift_storage/container_storage_interface_microshift/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/microshift_storage/container_storage_interface_microshift/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/images b/microshift_storage/container_storage_interface_microshift/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/microshift_storage/container_storage_interface_microshift/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc b/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc deleted file mode 100644 index a30265734821..000000000000 --- a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="persistent-storage-csi-microshift"] -= Configuring CSI volumes for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: persistent-storage-csi-microshift - -toc::[] - -The Container Storage Interface (CSI) allows {product-title} to consume -storage from storage back ends that implement the -link:https://github.com/container-storage-interface/spec[CSI interface] -as persistent storage. - -[NOTE] -==== -{product-title} {product-version} supports version 1.5.0 of the link:https://github.com/container-storage-interface/spec[CSI specification]. -==== - -include::modules/persistent-storage-csi-dynamic-provisioning.adoc[leveloffset=+1] -include::modules/persistent-storage-csi-mysql-example.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.12/html/storage/using-container-storage-interface-csi#persistent-storage-csi[{ocp} CSI Overview] \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/modules b/microshift_storage/container_storage_interface_microshift/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/microshift_storage/container_storage_interface_microshift/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/snippets b/microshift_storage/container_storage_interface_microshift/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/microshift_storage/container_storage_interface_microshift/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/microshift_storage/dynamic-provisioning-microshift.adoc b/microshift_storage/dynamic-provisioning-microshift.adoc deleted file mode 100644 index 62b80e93cf2a..000000000000 --- a/microshift_storage/dynamic-provisioning-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="dynamic-provisioning-microshift"] -= Dynamic provisioning for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: dynamic-provisioning-microshift - -toc::[] - -include::modules/dynamic-provisioning-about.adoc[leveloffset=+1] - -include::modules/dynamic-provisioning-defining-storage-class.adoc[leveloffset=+1] - -include::modules/dynamic-provisioning-storage-class-definition.adoc[leveloffset=+2] - -include::modules/dynamic-provisioning-annotations.adoc[leveloffset=+2] - -include::modules/dynamic-provisioning-change-default-class.adoc[leveloffset=+1] diff --git a/microshift_storage/expanding-persistent-volumes-microshift.adoc b/microshift_storage/expanding-persistent-volumes-microshift.adoc deleted file mode 100644 index e583b0ab31d3..000000000000 --- a/microshift_storage/expanding-persistent-volumes-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="expanding-persistent-volumes-microshift"] -= Expanding persistent volumes for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: expanding-persistent-volumes-microshift - -toc::[] - -Learn how to expand persistent volumes in {product-title}. - -include::modules/storage-expanding-csi-volumes.adoc[leveloffset=+1] - -include::modules/storage-expanding-local-volumes.adoc[leveloffset=+1] - -include::modules/storage-expanding-filesystem-pvc.adoc[leveloffset=+1] - -include::modules/storage-expanding-recovering-failure.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_storage/generic-ephemeral-volumes-microshift.adoc b/microshift_storage/generic-ephemeral-volumes-microshift.adoc deleted file mode 100644 index c28401c95259..000000000000 --- a/microshift_storage/generic-ephemeral-volumes-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="generic-ephemeral-volumes-microshift"] -= Generic ephemeral volumes for {product-title} -include::_attributes/common-attributes.adoc[] -:context: generic-ephemeral-volumes-microshift - -toc::[] - -include::modules/storage-ephemeral-vols-overview.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-lifecycle.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-security.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-pvc-naming.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-procedure.adoc[leveloffset=+1] diff --git a/microshift_storage/images b/microshift_storage/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_storage/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_storage/index.adoc b/microshift_storage/index.adoc deleted file mode 100644 index 15eb5282f21e..000000000000 --- a/microshift_storage/index.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="storage-overview-microshift"] -= {product-title} storage overview -include::_attributes/attributes-microshift.adoc[] -:context: storage-overview-microshift - -toc::[] - -{product-title} supports multiple types of storage, both for on-premise and cloud providers. You can manage container storage for persistent and non-persistent data in a {product-title} cluster. - -[id="microshift-storage-types"] -== Storage types - -{product-title} storage is broadly classified into two categories, namely ephemeral storage and persistent storage. - -[id="microshift-ephemeral-storage"] -=== Ephemeral storage - -Pods and containers are ephemeral or transient in nature and designed for stateless applications. Ephemeral storage allows administrators and developers to better manage the local storage for some of their operations. To read details about ephemeral storage, click xref:../microshift_storage/understanding-ephemeral-storage-microshift.adoc#understanding-ephemeral-storage-microshift[Understanding ephemeral storage]. - -[id="microshift-persistent-storage"] -=== Persistent storage - -Stateful applications deployed in containers require persistent storage. {product-title} uses a pre-provisioned storage framework called persistent volumes (PV) to allow cluster administrators to provision persistent storage. The data inside these volumes can exist beyond the lifecycle of an individual pod. Developers can use persistent volume claims (PVCs) to request storage requirements. For persistent storage details, read xref:../microshift_storage/understanding-persistent-storage-microshift.adoc#understanding-persistent-storage-microshift[Understanding persistent storage]. - -[id="microshift-dynamic-provisioning-overview"] -=== Dynamic storage provisioning - -Using dynamic provisioning allows you to create storage volumes on-demand, eliminating the need for pre-provisioned storage. For more information about how dynamic provisioning works in {product-title}, read xref:../microshift_storage/microshift-storage-plugin-overview.adoc#microshift-storage-plugin-overview[Dynamic provisioning]. - -//[id="microshift-container-storage-interface"] -//== Container Storage Interface (CSI) - -//CSI is an API specification for the management of container storage across different container orchestration (CO) systems. You can manage the storage volumes within the container native environments, without having specific knowledge of the underlying storage infrastructure. With the CSI, storage works uniformly across different container orchestration systems, regardless of the storage vendors you are using. For more information about CSI, read ../microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc#persistent-storage-csi-microshift[Using Container Storage Interface (CSI) for MicroShift]. \ No newline at end of file diff --git a/microshift_storage/microshift-storage-plugin-overview.adoc b/microshift_storage/microshift-storage-plugin-overview.adoc deleted file mode 100644 index 40cf9b624d2c..000000000000 --- a/microshift_storage/microshift-storage-plugin-overview.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-storage-plugin-overview"] -= Dynamic storage using the LVMS plugin -include::_attributes/attributes-microshift.adoc[] -:context: microshift-storage-plugin-overview - -toc::[] - -{product-title} enables dynamic storage provisioning that is ready for immediate use with the logical volume manager storage (LVMS) Container Storage Interface (CSI) provider. The LVMS plugin is the Red Hat downstream version of TopoLVM, a CSI plugin for managing LVM volumes for Kubernetes. - -LVMS provisions new logical volume management (LVM) logical volumes (LVs) for container workloads with appropriately configured persistent volume claims (PVC). Each PVC references a storage class that represents an LVM Volume Group (VG) on the host node. LVs are only provisioned for scheduled pods. - -include::modules/microshift-lvms-system-requirements.adoc[leveloffset=+1] -include::modules/microshift-lvms-deployment.adoc[leveloffset=+1] -include::modules/microshift-lvmd-yaml-creating.adoc[leveloffset=+1] -include::modules/microshift-lvms-config-example-basic.adoc[leveloffset=+1] -include::modules/microshift-lvms-using.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_storage/modules b/microshift_storage/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_storage/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_storage/snippets b/microshift_storage/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_storage/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_storage/understanding-ephemeral-storage-microshift.adoc b/microshift_storage/understanding-ephemeral-storage-microshift.adoc deleted file mode 100644 index 562e2c31ff70..000000000000 --- a/microshift_storage/understanding-ephemeral-storage-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-ephemeral-storage-microshift"] -= Understanding ephemeral storage for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: understanding-ephemeral-storage-microshift - -toc::[] - -Ephemeral storage is unstructured and temporary. It is often used with immutable applications. This guide discusses how ephemeral storage works for {product-title}. - -include::modules/storage-ephemeral-storage-overview.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-types.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-manage.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-monitoring.adoc[leveloffset=+1] diff --git a/microshift_storage/understanding-persistent-storage-microshift.adoc b/microshift_storage/understanding-persistent-storage-microshift.adoc deleted file mode 100644 index 3f2974c87b39..000000000000 --- a/microshift_storage/understanding-persistent-storage-microshift.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-persistent-storage-microshift"] -= Understanding persistent storage for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: understanding-persistent-storage-microshift - -toc::[] - -Managing storage is a distinct problem from managing compute resources. {product-title} uses the Kubernetes persistent volume (PV) framework to allow cluster administrators to provision persistent storage for a cluster. Developers can use persistent volume claims (PVCs) to request PV resources without having specific knowledge of the underlying storage infrastructure. - -include::modules/storage-persistent-storage-overview.adoc[leveloffset=+1] - -[id="additional-resources_understanding-persistent-storage-microshift"] -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/storage/understanding-persistent-storage#pv-access-modes_understanding-persistent-storage[Access modes for persistent storage] - -include::modules/storage-persistent-storage-lifecycle.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-reclaim-manual.adoc[leveloffset=+2] - -include::modules/storage-persistent-storage-reclaim.adoc[leveloffset=+2] - -include::modules/storage-persistent-storage-pv.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-pvc.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-fsGroup.adoc[leveloffset=+1] diff --git a/microshift_support/_attributes b/microshift_support/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_support/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_support/images b/microshift_support/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_support/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_support/microshift-etcd.adoc b/microshift_support/microshift-etcd.adoc deleted file mode 100644 index f8edbb6b6956..000000000000 --- a/microshift_support/microshift-etcd.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-etcd"] -= MicroShift etcd -include::_attributes/attributes-microshift.adoc[] -:context: microshift-etcd - -toc::[] - -[role="_abstract"] -{product-title} etcd is delivered as part of the {product-title} RPM. The etcd service is run as a separate process and the lifecycle is managed automatically by {product-title}. - -:FeatureName: MicroShift -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-observe-debug-etcd-server.adoc[leveloffset=+1] -include::modules/microshift-config-etcd.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_support/microshift-sos-report.adoc b/microshift_support/microshift-sos-report.adoc deleted file mode 100644 index 003f451db34b..000000000000 --- a/microshift_support/microshift-sos-report.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-sos-report"] -= MicroShift sos report -include::_attributes/attributes-microshift.adoc[] -:context: microshift-sos-report - -toc::[] - -[role="_abstract"] -`sos` is a tool you can use to collect troubleshooting information about a host. An `sos report` will generate a detailed report with all the enabled plugins and data from the different components and applications in a system. - -:FeatureName: MicroShift -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-about-sos-reports.adoc[leveloffset=+1] -include::modules/microshift-gathering-sos-report.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-sos-report"] -== Additional resources -* link:https://access.redhat.com/solutions/2112[How to provide files to Red Hat Support (vmcore, rhev logcollector, sosreports, heap dumps, log files, etc.] -* link:https://access.redhat.com/solutions/3592[What is an sos report and how to create one in {op-system-base-full}?] \ No newline at end of file diff --git a/microshift_support/modules b/microshift_support/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_support/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_support/snippets b/microshift_support/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_support/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_troubleshooting/_attributes b/microshift_troubleshooting/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_troubleshooting/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_troubleshooting/images b/microshift_troubleshooting/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_troubleshooting/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_troubleshooting/microshift-things-to-know.adoc b/microshift_troubleshooting/microshift-things-to-know.adoc deleted file mode 100644 index bcc13a898716..000000000000 --- a/microshift_troubleshooting/microshift-things-to-know.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-things-to-know"] -= Responsive restarts and security certificates -include::_attributes/attributes-microshift.adoc[] -:context: microshift-configuring - -toc::[] - -{product-title} responds to system configuration changes and restarts after alterations are detected, including IP address changes, clock adjustments, and security certificate age. - -[id="microshift-ip-address-clock-changes_{context}"] -== IP address changes or clock adjustments - -{product-title} depends on device IP addresses and system-wide clock settings to remain consistent during its runtime. However, these settings may occasionally change on edge devices, such as DHCP or Network Time Protocol (NTP) updates. - -When such changes occur, some {product-title} components may stop functioning properly. To mitigate this situation, {product-title} monitors the IP address and system time and restarts if either setting change is detected. - -The threshold for clock changes is a time adjustment of greater than 10 seconds in either direction. Smaller drifts on regular time adjustments performed by the Network Time Protocol (NTP) service do not cause a restart. - -include::modules/microshift-certificate-lifetime.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_troubleshooting/microshift-version.adoc b/microshift_troubleshooting/microshift-version.adoc deleted file mode 100644 index 9d3e8d83ce99..000000000000 --- a/microshift_troubleshooting/microshift-version.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-version"] -= Checking which version you have installed -include::_attributes/attributes-microshift.adoc[] -:context: microshift-version - -toc::[] - -To begin troubleshooting, determine which version of {product-title} you have installed. - -include::modules/microshift-version-cli.adoc[leveloffset=+1] - -include::modules/microshift-version-api.adoc[leveloffset=+1] diff --git a/microshift_troubleshooting/modules b/microshift_troubleshooting/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_troubleshooting/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_troubleshooting/snippets b/microshift_troubleshooting/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_troubleshooting/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_updating/_attributes b/microshift_updating/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_updating/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_updating/images b/microshift_updating/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_updating/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_updating/microshift-about-updates.adoc b/microshift_updating/microshift-about-updates.adoc deleted file mode 100644 index 8f8c152235e4..000000000000 --- a/microshift_updating/microshift-about-updates.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-about-updates"] -= About {product-title} updates -include::_attributes/attributes-microshift.adoc[] -:context: microshift-about-updates - -toc::[] - -You can update a {product-title} cluster by using the OpenShift CLI (`oc`). -// This PR is for the book build. Note that the OCP structure consists of a landing page of xrefs to other major sections within the book. MicroShift likely does not require that depth of structure, so starting simply with one page. - -[id="microshift-about-updates-understanding-microshift-updates"] -== Understanding {product-title} updates -{product-title} updates are available as either RPMs or by embedding the {product-title} image in an RPM OSTree blueprint. -You can update an {product-title} cluster by using the OpenShift CLI (`oc`). -//Platform administrators can view new update options by looking at the output of the `oc adm upgrade` command. -//An update begins when... - -[NOTE] -==== -Operators previously installed must be reinstalled using manifests. -==== - -[id="microshift-about-updates-rpm-updates"] -=== RPM updates -Using the RPM update method replaces your existing version. No rollback is possible with this update type. -//we can call a module here or xref out; not sure the best method for our use case until we have the content - -[id="microshift-about-updates-rpm-ostree-updates"] -=== RPM OSTree updates -Using the RPM OSTree update path allows for system rollback. -//we can call a module here or xref out; not sure the best method for our use case until we have the content - -[id="microshift-about-updates-checking-version-update-compatibility"] -== Checking version update compatibility -Before attempting an update, determine which version of {product-title} you have installed. Only the following update paths are supported: - -* Version 4.13 to 4.14 -//replace with matrix including RHEL versions? -//place xref here to version-check assembly - -[id="microshift-about-updates-update-disconnected-environment"] -== Updating a cluster in a disconnected environment -//sample topic only - -[id="microshift-about-updates-troubleshooting-updates"] -== Troubleshooting updates -//sample topic only \ No newline at end of file diff --git a/microshift_updating/modules b/microshift_updating/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_updating/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_updating/snippets b/microshift_updating/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_updating/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_welcome/_attributes b/microshift_welcome/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/microshift_welcome/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/microshift_welcome/images b/microshift_welcome/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/microshift_welcome/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/microshift_welcome/index.adoc b/microshift_welcome/index.adoc deleted file mode 100644 index 1eb07ee57f38..000000000000 --- a/microshift_welcome/index.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-welcome-index"] -= {product-title} {product-version} Documentation -include::_attributes/attributes-microshift.adoc[] -:context: microshift-welcome-index - -[.lead] -Welcome to the official {product-title} {product-version} documentation, where you can learn about {product-title} and start exploring its features. - -To browse the {product-title} {product-version} documentation, use one of the following methods: - -* Use the navigation bars and links to browse. -* Select the task that interests you from the contents of this Welcome page. - -To get started with {product-title}, use the following links: - -* xref:../microshift_getting_started/microshift-understanding.adoc#microshift-understanding[Understanding {product-title}] -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-rpm[Installing {product-title}] -* xref:../microshift_release_notes/microshift-4-14-release-notes.adoc#microshift-4-14-release-notes[{product-title} release notes] - -For related information, use the following links: - -* link:https://access.redhat.com/documentation/en-us/red_hat_device_edge/4/html/overview/device-edge-overview[Red Hat Device Edge overview] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/index[RHEL for Edge documentation] -* link:https://docs.openshift.com/container-platform/latest/welcome/index.html[OpenShift Container Platform documentation] \ No newline at end of file diff --git a/microshift_welcome/modules b/microshift_welcome/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/microshift_welcome/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/microshift_welcome/snippets b/microshift_welcome/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/microshift_welcome/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/_attributes b/migrating_from_ocp_3_to_4/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/migrating_from_ocp_3_to_4/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc b/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc deleted file mode 100644 index 3114c50dd278..000000000000 --- a/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-migrating-from-3-to-4"] -= About migrating from {product-title} 3 to 4 -include::_attributes/common-attributes.adoc[] -:context: about-migrating-from-3-to-4 - -toc::[] - -{product-title} 4 contains new technologies and functionality that result in a cluster that is self-managing, flexible, and automated. {product-title} 4 clusters are deployed and managed very differently from {product-title} 3. - -The most effective way to migrate from {product-title} 3 to 4 is by using a CI/CD pipeline to automate deployments in an link:https://www.redhat.com/en/topics/devops/what-is-application-lifecycle-management-alm[application lifecycle management] framework. - -If you do not have a CI/CD pipeline or if you are migrating stateful applications, you can use the {mtc-full} ({mtc-short}) to migrate your application workloads. - -You can use Red Hat Advanced Cluster Management for Kubernetes to help you import and manage your {product-title} 3 clusters easily, enforce policies, and redeploy your applications. Take advantage of the link:https://www.redhat.com/en/engage/free-access-redhat-e-202202170127[free subscription] to use Red Hat Advanced Cluster Management to simplify your migration process. - -To successfully transition to {product-title} 4, review the following information: - -xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[Differences between {product-title} 3 and 4]:: -* Architecture -* Installation and upgrade -* Storage, network, logging, security, and monitoring considerations - -xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#about-mtc-3-4[About the {mtc-full}]:: -* Workflow -* File system and snapshot copy methods for persistent volumes (PVs) -* Direct volume migration -* Direct image migration - -xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#advanced-migration-options-3-4[Advanced migration options]:: -* Automating your migration with migration hooks -* Using the {mtc-short} API -* Excluding resources from a migration plan -* Configuring the `MigrationController` custom resource for large-scale migrations -* Enabling automatic PV resizing for direct volume migration -* Enabling cached Kubernetes clients for improved performance - -For new features and enhancements, technical changes, and known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. diff --git a/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc b/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc deleted file mode 100644 index 26bce76859f7..000000000000 --- a/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-mtc-3-4"] -= About the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: about-mtc-3-4 - -toc::[] - -The {mtc-full} ({mtc-short}) enables you to migrate stateful application workloads from {product-title} 3 to {product-version} at the granularity of a namespace. - -[IMPORTANT] -==== -Before you begin your migration, be sure to review the xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4]. -==== - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -The {mtc-short} console is installed on the target cluster by default. You can configure the {mtc-full} Operator to install the console on an link:https://access.redhat.com/articles/5064151[{product-title} 3 source cluster or on a remote cluster]. - -{mtc-short} supports the file system and snapshot data copy methods for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -The service catalog is deprecated in {product-title} 4. You can migrate workload resources provisioned with the service catalog from {product-title} 3 to 4 but you cannot perform service catalog actions such as `provision`, `deprovision`, or `update` on these workloads after migration. The {mtc-short} console displays a message if the service catalog resources cannot be migrated. - -include::modules/migration-terminology.adoc[leveloffset=+1] -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] -include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1] -include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1] diff --git a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc deleted file mode 100644 index e7962ab751c8..000000000000 --- a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-migration-options-3-4"] -= Advanced migration options -include::_attributes/common-attributes.adoc[] -:context: advanced-migration-options-3-4 -:advanced-migration-options-3-4: - -toc::[] - -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. - -include::modules/migration-terminology.adoc[leveloffset=+1] - -include::modules/migration-migrating-on-prem-to-cloud.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For information about creating a MigCluster CR manifest for each remote cluster, see xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-migrating-applications-api_advanced-migration-options-3-4[Migrating an application by using the {mtc-short} API]. -* For information about adding a cluster using the web console, see xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migrating-applications-mtc-web-console_migrating-applications-3-4[Migrating your applications by using the {mtc-short} web console] - -[id="migrating-applications-cli_{context}"] -== Migrating applications by using the command line - -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. - -include::modules/migration-prerequisites.adoc[leveloffset=+2] -include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+2] -include::modules/migration-configuring-proxies.adoc[leveloffset=+3] -include::modules/migration-migrating-applications-api.adoc[leveloffset=+2] -include::modules/migration-state-migration-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-state-migration_{context}"] -[discrete] -=== Additional resources - -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-pvcs_advanced-migration-options-3-4[Excluding PVCs from migration] to select PVCs for state migration. -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-mapping-pvcs_advanced-migration-options-3-4[Mapping PVCs] to migrate source PV data to provisioned PVCs on the destination cluster. -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-kubernetes-objects_advanced-migration-options-3-4[Migrating Kubernetes objects] to migrate the Kubernetes objects that constitute an application's state. - -include::modules/migration-hooks.adoc[leveloffset=+1] -include::modules/migration-writing-ansible-playbook-hook.adoc[leveloffset=+2] - -[id="migration-plan-options_{context}"] -== Migration plan options - -You can exclude, edit, and map components in the `MigPlan` custom resource (CR). - -include::modules/migration-excluding-resources.adoc[leveloffset=+2] -include::modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc[leveloffset=+2] -include::modules/migration-excluding-pvcs.adoc[leveloffset=+2] -include::modules/migration-mapping-pvcs.adoc[leveloffset=+2] -include::modules/migration-editing-pvs-in-migplan.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-editing-pv-attributes_{context}"] -[discrete] -==== Additional resources - -* For details about the `move` and `copy` actions, see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[MTC workflow]. -* For details about the `skip` action, see xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-pvcs_advanced-migration-options-3-4[Excluding PVCs from migration]. -* For details about the file system and snapshot copy methods, see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods]. - -include::modules/migration-kubernetes-objects.adoc[leveloffset=+2] - -[id="migration-controller-options_{context}"] -== Migration controller options - -You can edit migration plan limits, enable persistent volume resizing, or enable cached Kubernetes clients in the `MigrationController` custom resource (CR) for large migrations and improved performance. - -include::modules/migration-changing-migration-plan-limits.adoc[leveloffset=+2] -include::modules/migration-enabling-pv-resizing-dvm.adoc[leveloffset=+2] -include::modules/migration-enabling-cached-kubernetes-clients.adoc[leveloffset=+2] - -:advanced-migration-options-3-4!: diff --git a/migrating_from_ocp_3_to_4/images b/migrating_from_ocp_3_to_4/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/migrating_from_ocp_3_to_4/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/index.adoc b/migrating_from_ocp_3_to_4/index.adoc deleted file mode 100644 index 349b4d9779eb..000000000000 --- a/migrating_from_ocp_3_to_4/index.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="migration-from-version-3-to-4-overview"] -= Migration from OpenShift Container Platform 3 to 4 overview -include::_attributes/common-attributes.adoc[] -:context: migration-from-version-3-to-4-overview - -toc::[] - -{product-title} 4 clusters are different from {product-title} 3 clusters. {product-title} 4 clusters contain new technologies and functionality that result in a cluster that is self-managing, flexible, and automated. To learn more about migrating from {product-title} 3 to 4 see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from OpenShift Container Platform 3 to 4]. - -[id="mtc-3-to-4-overview-differences-mtc"] -== Differences between {product-title} 3 and 4 -Before migrating from {product-title} 3 to 4, you can check xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4]. Review the following information: - -* xref:../architecture/architecture.adoc#architecture[Architecture] -* xref:../architecture/architecture-installation.adoc#architecture-installation[Installation and update] -* xref:../storage/index.adoc#index[Storage], xref:../networking/understanding-networking.adoc#understanding-networking[network], xref:../logging/cluster-logging.adoc#cluster-logging[logging], xref:../security/index.adoc#index[security], and xref:../monitoring/monitoring-overview.adoc#monitoring-overview[monitoring considerations] - -[id="mtc-3-to-4-overview-planning-network-considerations-mtc"] -== Planning network considerations -Before migrating from {product-title} 3 to 4, review the xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4] for information about the following areas: - -* xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#dns-considerations_planning-considerations-3-4[DNS considerations] -** xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#migration-isolating-dns-domain-of-target-cluster-from-clients_planning-considerations-3-4[Isolating the DNS domain of the target cluster from the clients]. -** xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#migration-setting-up-target-cluster-to-accept-source-dns-domain_planning-considerations-3-4[Setting up the target cluster to accept the source DNS domain]. - -You can migrate stateful application workloads from {product-title} 3 to 4 at the granularity of a namespace. To learn more about MTC see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#about-mtc-3-4[Understanding MTC]. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -[id="mtc-overview-install-mtc"] -== Installing MTC -Review the following tasks to install the MTC: - -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-mtc-on-ocp-4_installing-3-4[Install the {mtc-full} Operator on target cluster by using Operator Lifecycle Manager (OLM)]. -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Install the legacy {mtc-full} Operator on the source cluster manually]. -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#configuring-replication-repository_installing-3-4[Configure object storage to use as a replication repository]. - -[id="mtc-overview-upgrade-mtc"] -== Upgrading MTC -You xref:../migrating_from_ocp_3_to_4/upgrading-3-4.adoc#upgrading-3-4[upgrade the {mtc-full} ({mtc-short})] on {product-title} {product-version} by using OLM. You upgrade {mtc-short} on {product-title} 3 by reinstalling the legacy {mtc-full} Operator. - -[id="mtc-overview-mtc-checklists"] -== Reviewing premigration checklists -Before you migrate your application workloads with the Migration Toolkit for Containers (MTC), review the xref:../migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc#premigration-checklists-3-4[premigration checklists]. - -[id="mtc-overview-migrate-mtc-applications"] -== Migrating applications -You can migrate your applications by using the MTC xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migrating-applications-mtc-web-console_migrating-applications-3-4[web console] or xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migrating-applications-cli_advanced-migration-options-3-4[the command line]. - -[id="mtc-overview-advanced-migration-options"] -== Advanced migration options -You can automate your migrations and modify MTC custom resources to improve the performance of large-scale migrations by using the following options: - -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-state-migration-cli_advanced-migration-options-3-4[Running a state migration] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-hooks_advanced-migration-options-3-4[Creating migration hooks] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-plan-options_advanced-migration-options-3-4[Editing, excluding, and mapping migrated resources] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-controller-options_advanced-migration-options-3-4[Configuring the migration controller for large migrations] - -[id="mtc-overview-troubleshooting-mtc"] -== Troubleshooting migrations -You can perform the following troubleshooting tasks: - -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-viewing-migration-plan-resources_troubleshooting-3-4[Viewing migration plan resources by using the MTC web console] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-viewing-migration-plan-log_troubleshooting-3-4[Viewing the migration plan aggregated log file] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-mig-log-reader_troubleshooting-3-4[Using the migration log reader] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-accessing-performance-metrics_troubleshooting-3-4[Accessing performance metrics] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-must-gather_troubleshooting-3-4[Using the `must-gather` tool] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-debugging-velero-resources_troubleshooting-3-4[Using the Velero CLI to debug `Backup` and `Restore` CRs] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-mtc-crs-for-troubleshooting_troubleshooting-3-4[Using MTC custom resources for troubleshooting] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#common-issues-and-concerns_troubleshooting-3-4[Checking common issues and concerns] - -[id="mtc-overview-roll-back-mtc"] -== Rolling back a migration -You can xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#rolling-back-migration_troubleshooting-3-4[roll back a migration] by using the MTC web console, by using the CLI, or manually. - -[id="mtc-overview-uninstall-mtc"] -== Uninstalling MTC and deleting resources -You can xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-3-4[uninstall the MTC and delete its resources] to clean up the cluster. diff --git a/migrating_from_ocp_3_to_4/installing-3-4.adoc b/migrating_from_ocp_3_to_4/installing-3-4.adoc deleted file mode 100644 index f22b2e7ce4d7..000000000000 --- a/migrating_from_ocp_3_to_4/installing-3-4.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-3-4"] -= Installing the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: installing-3-4 -:installing-3-4: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 3 and 4. - -After you install the {mtc-full} Operator on {product-title} {product-version} by using the Operator Lifecycle Manager, you manually install the legacy {mtc-full} Operator on {product-title} 3. - -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[source cluster or on a remote cluster]. - -After you have installed {mtc-short}, you must configure an object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-3-4[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -You must configure an object storage to use as a replication repository. The {mtc-full} ({mtc-short}) copies data from the source cluster to the replication repository, and then from the replication repository to the target cluster. - -{mtc-short} supports the xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -The following storage providers are supported: - -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-mcg_installing-3-4[Multicloud Object Gateway] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-aws-s3_installing-3-4[Amazon Web Services S3] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-gcp_installing-3-4[Google Cloud Platform] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-azure_installing-3-4[Microsoft Azure Blob] -* Generic S3 object storage, for example, Minio or Ceph S3 - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+2] -include::modules/migration-configuring-gcp.adoc[leveloffset=+2] -include::modules/migration-configuring-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[{mtc-short} workflow] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods] -* xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-adding-replication-repository-to-cam_migrating-applications-3-4[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] -:installing-3-4!: diff --git a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc b/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc deleted file mode 100644 index 2f26cd542668..000000000000 --- a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-3-4"] -= Installing the Migration Toolkit for Containers in a restricted network environment -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-3-4 -:installing-restricted-3-4: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 3 and 4 in a restricted network environment by performing the following procedures: - -. Create a xref:../operators/admin/olm-restricted-networks.adoc#olm-mirror-catalog_olm-restricted-networks[mirrored Operator catalog]. -+ -This process creates a `mapping.txt` file, which contains the mapping between the `registry.redhat.io` image and your mirror registry image. The `mapping.txt` file is required for installing the Operator on the source cluster. -. Install the {mtc-full} Operator on the {product-title} {product-version} target cluster by using Operator Lifecycle Manager. -+ -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[source cluster or on a remote cluster]. - -. Install the _legacy_ {mtc-full} Operator on the {product-title} 3 source cluster from the command line interface. -. Configure object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-restricted-3-4[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -The Multicloud Object Gateway is the only supported option for a restricted network environment. - -{mtc-short} supports the xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9/html/planning_your_deployment/disconnected-environment_rhodf[Disconnected environment] in the {rh-storage-first} documentation. -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[{mtc-short} workflow] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods] -* xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-adding-replication-repository-to-cam_migrating-applications-3-4[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] -:installing-restricted-3-4!: diff --git a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc b/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc deleted file mode 100644 index 613fe5bb3b6f..000000000000 --- a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="migrating-applications-3-4"] -= Migrating your applications -include::_attributes/common-attributes.adoc[] -:context: migrating-applications-3-4 -:migrating-applications-3-4: - -toc::[] - -You can migrate your applications by using the {mtc-full} ({mtc-short}) web console or from the xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migrating-applications-cli_advanced-migration-options-3-4[command line]. - -You can use stage migration and cutover migration to migrate an application between clusters: - -* Stage migration copies data from the source cluster to the target cluster without stopping the application. You can run a stage migration multiple times to reduce the duration of the cutover migration. -* Cutover migration stops the transactions on the source cluster and moves the resources to the target cluster. - -You can use state migration to migrate an application's state: - -* State migration copies selected persistent volume claims (PVCs). -* You can use state migration to migrate a namespace within the same cluster. - -Most cluster-scoped resources are not yet handled by {mtc-short}. If your applications require cluster-scoped resources, you might have to create them manually on the target cluster. - -During migration, {mtc-short} preserves the following namespace annotations: - -* `openshift.io/sa.scc.mcs` -* `openshift.io/sa.scc.supplemental-groups` -* `openshift.io/sa.scc.uid-range` - -These annotations preserve the UID range, ensuring that the containers retain their file system permissions on the target cluster. There is a risk that the migrated UIDs could duplicate UIDs within an existing or future namespace on the target cluster. - -include::modules/migration-prerequisites.adoc[leveloffset=+1] - -[role="_additional-resources"] -[discrete] -[id="additional-resources-for-migration-prerequisites_{context}"] -=== Additional resources for migration prerequisites - -* link:https://docs.openshift.com/container-platform/3.11/install_config/registry/securing_and_exposing_registry.html#exposing-the-registry[Manually exposing a secure registry for {product-title} 3] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-updating-deprecated-internal-images_troubleshooting-3-4[Updating deprecated internal images] - -[id="migrating-applications-mtc-web-console_{context}"] -== Migrating your applications by using the {mtc-short} web console - -You can configure clusters and a replication repository by using the {mtc-short} web console. Then, you can create and run a migration plan. - -include::modules/migration-launching-cam.adoc[leveloffset=+2] -include::modules/migration-adding-cluster-to-cam.adoc[leveloffset=+2] -include::modules/migration-adding-replication-repository-to-cam.adoc[leveloffset=+2] -include::modules/migration-creating-migration-plan-cam.adoc[leveloffset=+2] - -[role="_additional-resources"] -[discrete] -[id="additional-resources-for-persistent-volume-copy-methods_{context}"] -=== Additional resources - -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#file-system-copy-method_about-mtc-3-4[{mtc-short} file system copy method] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#snapshot-copy-method_about-mtc-3-4[{mtc-short} snapshot copy method] - -include::modules/migration-running-migration-plan-cam.adoc[leveloffset=+2] - -:migrating-applications-3-4!: diff --git a/migrating_from_ocp_3_to_4/modules b/migrating_from_ocp_3_to_4/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/migrating_from_ocp_3_to_4/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc b/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc deleted file mode 100644 index 869067e5371f..000000000000 --- a/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="planning-considerations-3-4"] -= Network considerations -include::_attributes/common-attributes.adoc[] -:context: planning-considerations-3-4 - -toc::[] - -Review the strategies for redirecting your application network traffic after migration. - -[id="dns-considerations_{context}"] -== DNS considerations - -The DNS domain of the target cluster is different from the domain of the source cluster. By default, applications get FQDNs of the target cluster after migration. - -To preserve the source DNS domain of migrated applications, select one of the two options described below. - -include::modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc[leveloffset=+2] -include::modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../security/certificates/replacing-default-ingress-certificate.adoc#replacing-default-ingress[Replacing the default ingress certificate] for more information. - -include::modules/migration-network-traffic-redirection-strategies.adoc[leveloffset=+1] diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc deleted file mode 100644 index eb8636c56569..000000000000 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ /dev/null @@ -1,239 +0,0 @@ -:_content-type: ASSEMBLY -[id="planning-migration-3-4"] -= Differences between {product-title} 3 and 4 -include::_attributes/common-attributes.adoc[] -:context: planning-migration-3-4 - -toc::[] - -{product-title} {product-version} introduces architectural changes and enhancements. The procedures that you used to manage your {product-title} 3 cluster might not apply to {product-title} 4. - -ifndef::openshift-origin[] -For information about configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information about new features and other notable technical changes, review the xref:../release_notes/ocp-4-14-release-notes.adoc#ocp-4-14-release-notes[OpenShift Container Platform 4.14 release notes]. -endif::[] - -It is not possible to upgrade your existing {product-title} 3 cluster to {product-title} 4. You must start with a new {product-title} 4 installation. Tools are available to assist in migrating your control plane settings and application workloads. - -[id="migration-differences-architecture"] -== Architecture - -With {product-title} 3, administrators individually deployed {op-system-base-full} hosts, and then installed {product-title} on top of these hosts to form a cluster. Administrators were responsible for properly configuring these hosts and performing updates. - -{product-title} 4 represents a significant change in the way that {product-title} clusters are deployed and managed. {product-title} 4 includes new technologies and functionality, such as Operators, machine sets, and {op-system-first}, which are core to the operation of the cluster. This technology shift enables clusters to self-manage some functions previously performed by administrators. This also ensures platform stability and consistency, and simplifies installation and scaling. - -Beginning with {product-title} 4.13, {op-system} now uses {op-system-base-full} 9.2 packages. This enhancement enables the latest fixes and features as well as the latest hardware support and driver updates. For more information about how this upgrade to RHEL 9.2 might affect your options configuration and services as well as driver and container support, see the link:https://docs.openshift.com/container-platform/4.13/release_notes/ocp-4-13-release-notes.html#ocp-4-13-rhel-9-considerations[RHCOS now uses RHEL 9.2] in the _OpenShift Container Platform 4.13 release notes_. - -For more information, see xref:../architecture/architecture.adoc#architecture[OpenShift Container Platform architecture]. - -[discrete] -=== Immutable infrastructure - -{product-title} 4 uses {op-system-first}, which is designed to run containerized applications, and provides efficient installation, Operator-based management, and simplified upgrades. {op-system} is an immutable container host, rather than a customizable operating system like {op-system-base}. {op-system} enables {product-title} 4 to manage and automate the deployment of the underlying container host. {op-system} is a part of {product-title}, which means that everything runs inside a container and is deployed using {product-title}. - -In {product-title} 4, control plane nodes must run {op-system}, ensuring that full-stack automation is maintained for the control plane. This makes rolling out updates and upgrades a much easier process than in {product-title} 3. - -For more information, see xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[{op-system-first}]. - -[discrete] -=== Operators - -Operators are a method of packaging, deploying, and managing a Kubernetes application. Operators ease the operational complexity of running another piece of software. They watch over your environment and use the current state to make decisions in real time. Advanced Operators are designed to upgrade and react to failures automatically. - -For more information, see xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understanding Operators]. - -[id="migration-differences-install"] -== Installation and upgrade - -[discrete] -=== Installation process - -To install {product-title} 3.11, you prepared your {op-system-base-full} hosts, set all of the configuration values your cluster needed, and then ran an Ansible playbook to install and set up your cluster. - -In {product-title} {product-version}, you use the OpenShift installation program to create a minimum set of resources required for a cluster. After the cluster is running, you use Operators to further configure your cluster and to install new services. After first boot, {op-system-first} systems are managed by the Machine Config Operator (MCO) that runs in the {product-title} cluster. - -For more information, see xref:../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process]. - -ifndef::openshift-origin[] -If you want to add {op-system-base-full} worker machines to your {product-title} {product-version} cluster, you use an Ansible playbook to join the {op-system-base} worker machines after the cluster is running. For more information, see xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Adding {op-system-base} compute machines to an {product-title} cluster]. -endif::[] - -[discrete] -=== Infrastructure options - -In {product-title} 3.11, you installed your cluster on infrastructure that you prepared and maintained. In addition to providing your own infrastructure, {product-title} 4 offers an option to deploy a cluster on infrastructure that the {product-title} installation program provisions and the cluster maintains. - -For more information, see xref:../architecture/architecture-installation.adoc#installation-overview_architecture-installation[OpenShift Container Platform installation overview]. - -[discrete] -=== Upgrading your cluster - -In {product-title} 3.11, you upgraded your cluster by running Ansible playbooks. In {product-title} {product-version}, the cluster manages its own updates, including updates to {op-system-first} on cluster nodes. You can easily upgrade your cluster by using the web console or by using the `oc adm upgrade` command from the OpenShift CLI and the Operators will automatically upgrade themselves. If your {product-title} {product-version} cluster has {op-system-base} worker machines, then you will still need to run an Ansible playbook to upgrade those worker machines. - -For more information, see xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating clusters]. - -[id="migration-considerations"] -== Migration considerations - -Review the changes and other considerations that might affect your transition from {product-title} 3.11 to {product-title} 4. - -[id="migration-preparing-storage"] -=== Storage considerations - -Review the following storage changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Local volume persistent storage - -Local storage is only supported by using the Local Storage Operator in {product-title} {product-version}. It is not supported to use the local provisioner method from {product-title} 3.11. - -For more information, see xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. - -[discrete] -==== FlexVolume persistent storage - -The FlexVolume plugin location changed from {product-title} 3.11. The new location in {product-title} {product-version} is `/etc/kubernetes/kubelet-plugins/volume/exec`. Attachable FlexVolume plugins are no longer supported. - -For more information, see xref:../storage/persistent_storage/persistent-storage-flexvolume.adoc#persistent-storage-using-flexvolume[Persistent storage using FlexVolume]. - -[discrete] -==== Container Storage Interface (CSI) persistent storage - -Persistent storage using the Container Storage Interface (CSI) was link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] in {product-title} 3.11. {product-title} {product-version} ships with xref:../storage/container_storage_interface/persistent-storage-csi.adoc#csi-drivers-supported_persistent-storage-csi[several CSI drivers]. You can also install your own driver. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[Persistent storage using the Container Storage Interface (CSI)]. - -[discrete] -==== Red Hat OpenShift Data Foundation - -OpenShift Container Storage 3, which is available for use with {product-title} 3.11, uses Red Hat Gluster Storage as the backing storage. - -{rh-storage-first} 4, which is available for use with {product-title} 4, uses Red Hat Ceph Storage as the backing storage. - -For more information, see xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Persistent storage using Red Hat OpenShift Data Foundation] and the link:https://access.redhat.com/articles/4731161[interoperability matrix] article. - -[discrete] -==== Unsupported persistent storage options - -Support for the following persistent storage options from {product-title} 3.11 has changed in {product-title} {product-version}: - -* GlusterFS is no longer supported. -* CephFS as a standalone product is no longer supported. -* Ceph RBD as a standalone product is no longer supported. - -If you used one of these in {product-title} 3.11, you must choose a different persistent storage option for full support in {product-title} {product-version}. - -For more information, see xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. - -[discrete] -==== Migration of in-tree volumes to CSI drivers - -{product-title} 4 is migrating in-tree volume plugins to their Container Storage Interface (CSI) counterparts. In {product-title} {product-version}, CSI drivers are the new default for the following in-tree volume types: - -* Amazon Web Services (AWS) Elastic Block Storage (EBS) -* Azure Disk -* Azure File -* Google Cloud Platform Persistent Disk (GCP PD) -* OpenStack Cinder -* VMware vSphere -+ -[NOTE] -==== -As of {product-title} 4.13, VMware vSphere is not available by default. However, you can opt into VMware vSphere. -==== - -All aspects of volume lifecycle, such as creation, deletion, mounting, and unmounting, is handled by the CSI driver. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration[CSI automatic migration]. - -[id="migration-preparing-networking"] -=== Networking considerations - -Review the following networking changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Network isolation mode - -The default network isolation mode for {product-title} 3.11 was `ovs-subnet`, though users frequently switched to use `ovn-multitenant`. The default network isolation mode for {product-title} {product-version} is controlled by a network policy. - -If your {product-title} 3.11 cluster used the `ovs-subnet` or `ovs-multitenant` mode, it is recommended to switch to a network policy for your {product-title} {product-version} cluster. Network policies are supported upstream, are more flexible, and they provide the functionality that `ovs-multitenant` does. If you want to maintain the `ovs-multitenant` behavior while using a network policy in {product-title} {product-version}, follow the steps to xref:../networking/network_policy/multitenant-network-policy.adoc#multitenant-network-policy[configure multitenant isolation using network policy]. - -For more information, see xref:../networking/network_policy/about-network-policy.adoc#about-network-policy[About network policy]. - -[discrete] -==== OVN-Kubernetes as the default networking plugin in Red Hat OpenShift Networking - -In {product-title} 3.11, OpenShift SDN was the default networking plugin in Red Hat OpenShift Networking. In {product-title} {product-version}, OVN-Kubernetes is now the default networking plugin. - -For information on migrating to OVN-Kubernetes from OpenShift SDN, see xref:../networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc#migrate-from-openshift-sdn[Migrating from the OpenShift SDN network plugin]. - -[id="migration-preparing-logging"] -=== Logging considerations - -Review the following logging changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Deploying OpenShift Logging - -{product-title} 4 provides a simple deployment mechanism for OpenShift Logging, by using a Cluster Logging custom resource. - -For more information, see xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploying_cluster-logging-deploying[Installing OpenShift Logging]. - -[discrete] -==== Aggregated logging data - -You cannot transition your aggregate logging data from {product-title} 3.11 into your new {product-title} 4 cluster. - -For more information, see xref:../logging/cluster-logging.adoc#cluster-logging-about_cluster-logging[About OpenShift Logging]. - -[discrete] -==== Unsupported logging configurations - -Some logging configurations that were available in {product-title} 3.11 are no longer supported in {product-title} {product-version}. - -For more information on the explicitly unsupported logging cases, see xref:../logging/config/cluster-logging-maintenance-support.adoc#cluster-logging-maintenance-and-support[Maintenance and support]. - -[id="migration-preparing-security"] -=== Security considerations - -Review the following security changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Unauthenticated access to discovery endpoints - -In {product-title} 3.11, an unauthenticated user could access the discovery endpoints (for example, [x-]`/api/*` and [x-]`/apis/*`). For security reasons, unauthenticated access to the discovery endpoints is no longer allowed in {product-title} {product-version}. If you do need to allow unauthenticated access, you can configure the RBAC settings as necessary; however, be sure to consider the security implications as this can expose internal cluster components to the external network. - -// TODO: Anything to xref to, or additional details? - -[discrete] -==== Identity providers - -Configuration for identity providers has changed for {product-title} 4, including the following notable changes: - -* The request header identity provider in {product-title} {product-version} requires mutual TLS, where in {product-title} 3.11 it did not. -* The configuration of the OpenID Connect identity provider was simplified in {product-title} {product-version}. It now obtains data, which previously had to specified in {product-title} 3.11, from the provider's `/.well-known/openid-configuration` endpoint. - -For more information, see xref:../authentication/understanding-identity-provider.adoc#understanding-identity-provider[Understanding identity provider configuration]. - -[discrete] -==== OAuth token storage format - -Newly created OAuth HTTP bearer tokens no longer match the names of their OAuth access token objects. The object names are now a hash of the bearer token and are no longer sensitive. This reduces the risk of leaking sensitive information. - -[discrete] -==== Default security context constraints - -The `restricted` security context constraints (SCC) in {product-title} 4 can no longer be accessed by any authenticated user as the `restricted` SCC in {product-title} 3.11. The broad authenticated access is now granted to the `restricted-v2` SCC, which is more restrictive than the old `restricted` SCC. The `restricted` SCC still exists; users that want to use it must be specifically given permissions to do it. - -For more information, see xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints]. - -[id="migration-preparing-monitoring"] -=== Monitoring considerations - -Review the following monitoring changes when transitioning from {product-title} 3.11 to {product-title} {product-version}. You cannot migrate Hawkular configurations and metrics to Prometheus. - -[discrete] -==== Alert for monitoring infrastructure availability - -The default alert that triggers to ensure the availability of the monitoring structure was called `DeadMansSwitch` in {product-title} 3.11. This was renamed to `Watchdog` in {product-title} 4. If you had PagerDuty integration set up with this alert in {product-title} 3.11, you must set up the PagerDuty integration for the `Watchdog` alert in {product-title} 4. - -For more information, see xref:../monitoring/managing-alerts.adoc#applying-custom-alertmanager-configuration_managing-alerts[Applying custom Alertmanager configuration]. diff --git a/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc b/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc deleted file mode 100644 index 8db3285d6cc1..000000000000 --- a/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc +++ /dev/null @@ -1,113 +0,0 @@ -:_content-type: ASSEMBLY -[id="premigration-checklists-3-4"] -= Premigration checklists -include::_attributes/common-attributes.adoc[] -:context: premigration-checklists-3-4 - -toc::[] - -Before you migrate your application workloads with the {mtc-full} ({mtc-short}), review the following checklists. - -[id="resources_{context}"] -== Resources - -* [ ] If your application uses an internal service network or an external route for communicating with services, the relevant route exists. -* [ ] If your application uses cluster-level resources, you have re-created them on the target cluster. -* [ ] You have xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-resources_advanced-migration-options-3-4[excluded] persistent volumes (PVs), image streams, and other resources that you do not want to migrate. -* [ ] PV data has been backed up in case an application displays unexpected behavior after migration and corrupts the data. - -[id="source-cluster_{context}"] -== Source cluster - -* [ ] The cluster meets the link:https://docs.openshift.com/container-platform/3.11/install/prerequisites.html#hardware[minimum hardware requirements]. -* [ ] You have installed the correct legacy {mtc-full} Operator version: -** `operator-3.7.yml` on {product-title} version 3.7. -** `operator.yml` on {product-title} versions 3.9 to 4.5. -* [ ] All nodes have an active {product-title} subscription. -* [ ] You have performed all the link:https://docs.openshift.com/container-platform/3.11/day_two_guide/run_once_tasks.html#day-two-guide-default-storage-class[run-once tasks]. -* [ ] You have performed all the link:https://docs.openshift.com/container-platform/3.11/day_two_guide/environment_health_checks.html[environment health checks]. -* [ ] You have checked for PVs with abnormal configurations stuck in a *Terminating* state by running the following command: -+ -[source,terminal] ----- -$ oc get pv ----- - -* [ ] You have checked for pods whose status is other than *Running* or *Completed* by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces | egrep -v 'Running | Completed' ----- - -* [ ] You have checked for pods with a high restart count by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces --field-selector=status.phase=Running \ - -o json | jq '.items[]|select(any( .status.containerStatuses[]; \ - .restartCount > 3))|.metadata.name' ----- -+ -Even if the pods are in a *Running* state, a high restart count might indicate underlying problems. - -* [ ] You have removed old builds, deployments, and images from each namespace to be migrated by xref:../applications/pruning-objects.adoc#pruning-objects[pruning]. -* [ ] The {product-registry} uses a link:https://docs.openshift.com/container-platform/3.11/scaling_performance/optimizing_storage.html#registry[supported storage type]. -* [ ] Direct image migration only: The {product-registry} is link:https://docs.openshift.com/container-platform/3.11/install_config/registry/securing_and_exposing_registry.html#exposing-the-registry[exposed] to external traffic. -* [ ] You can read and write images to the registry. -* [ ] The link:https://access.redhat.com/articles/3093761[etcd cluster] is healthy. -* [ ] The link:https://docs.openshift.com/container-platform/3.11/install_config/master_node_configuration.html#master-node-configuration-node-qps-burst[average API server response time] on the source cluster is less than 50 ms. -* [ ] The cluster certificates are link:https://docs.openshift.com/container-platform/3.11/install_config/redeploying_certificates.html#install-config-cert-expiry[valid] for the duration of the migration process. -* [ ] You have checked for pending certificate-signing requests by running the following command: -+ -[source,terminal] ----- -$ oc get csr -A | grep pending -i ----- - -* [ ] The link:https://docs.openshift.com/container-platform/3.11/install_config/configuring_authentication.html#overview[identity provider] is working. -* [ ] You have set the value of the `openshift.io/host.generated` annotation parameter to `true` for each {product-title} route, which updates the host name of the route for the target cluster. Otherwise, the migrated routes retain the source cluster host name. - -[id="target-cluster_{context}"] -== Target cluster - -* [ ] You have installed {mtc-full} Operator version 1.5.1. -* [ ] All xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-prerequisites_migrating-applications-3-4[{mtc-short} prerequisites] are met. -* [ ] The cluster meets the minimum hardware requirements for the specific platform and installation method, for example, on xref:../installing/installing_bare_metal/installing-bare-metal.adoc#minimum-resource-requirements_installing-bare-metal[bare metal]. -* [ ] The cluster has xref:../storage/dynamic-provisioning.adoc#defining-storage-classes_dynamic-provisioning[storage classes] defined for the storage types used by the source cluster, for example, block volume, file system, or object storage. -+ -[NOTE] -==== -NFS does not require a defined storage class. -==== - -* [ ] The cluster has the correct network configuration and permissions to access external services, for example, databases, source code repositories, container image registries, and CI/CD tools. -* [ ] External applications and services that use services provided by the cluster have the correct network configuration and permissions to access the cluster. -* [ ] Internal container image dependencies are met. -+ -If an application uses an internal image in the `openshift` namespace that is not supported by {product-title} {product-version}, you can manually update the xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-updating-deprecated-internal-images_troubleshooting-3-4[{product-title} 3 image stream tag] with `podman`. -* [ ] The target cluster and the replication repository have sufficient storage space. -* [ ] The xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[identity provider] is working. -* [ ] DNS records for your application exist on the target cluster. -* [ ] Certificates that your application uses exist on the target cluster. -* [ ] You have configured appropriate firewall rules on the target cluster. -* [ ] You have correctly configured load balancing on the target cluster. -* [ ] If you migrate objects to an existing namespace on the target cluster that has the same name as the namespace being migrated from the source, the target namespace contains no objects of the same name and type as the objects being migrated. -+ -[NOTE] -==== -Do not create namespaces for your application on the target cluster before migration because this might cause quotas to change. -==== - -[id="performance_{context}"] -== Performance - -* [ ] The migration network has a minimum throughput of 10 Gbps. -* [ ] The clusters have sufficient resources for migration. -+ -[NOTE] -==== -Clusters require additional memory, CPUs, and storage in order to run a migration on top of normal workloads. Actual resource requirements depend on the number of Kubernetes resources being migrated in a single migration plan. You must test migrations in a non-production environment in order to estimate the resource requirements. -==== -* [ ] The xref:../support/troubleshooting/verifying-node-health.adoc#reviewing-node-status-use-and-configuration_verifying-node-health[memory and CPU usage] of the nodes are healthy. -* [ ] The link:https://access.redhat.com/solutions/4885641[etcd disk performance] of the clusters has been checked with `fio`. diff --git a/migrating_from_ocp_3_to_4/snippets b/migrating_from_ocp_3_to_4/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/migrating_from_ocp_3_to_4/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc b/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc deleted file mode 100644 index 951cf515fd72..000000000000 --- a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-3-4"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-3-4 -:troubleshooting-3-4: -:namespace: openshift-migration -:local-product: {mtc-short} -:must-gather: registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v{mtc-version} - -toc::[] - -This section describes resources for troubleshooting the {mtc-full} ({mtc-short}). - -For known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. - -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] - -[discrete] -include::modules/migration-about-mtc-custom-resources.adoc[leveloffset=+2] - -include::modules/migration-mtc-cr-manifests.adoc[leveloffset=+1] - -[id="logs-and-debugging-tools_{context}"] -== Logs and debugging tools - -This section describes logs and debugging tools that you can use for troubleshooting. - -include::modules/migration-viewing-migration-plan-resources.adoc[leveloffset=+2] -include::modules/migration-viewing-migration-plan-log.adoc[leveloffset=+2] -include::modules/migration-using-mig-log-reader.adoc[leveloffset=+2] -include::modules/migration-accessing-performance-metrics.adoc[leveloffset=+2] -include::modules/migration-provided-metrics.adoc[leveloffset=+3] -include::modules/migration-using-must-gather.adoc[leveloffset=+2] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+2] -include::modules/migration-partial-failure-velero.adoc[leveloffset=+2] -include::modules/migration-using-mtc-crs-for-troubleshooting.adoc[leveloffset=+2] - -[id="common-issues-and-concerns_{context}"] -== Common issues and concerns - -This section describes common issues and concerns that can cause issues during migration. - -include::modules/migration-updating-deprecated-internal-images.adoc[leveloffset=+2] -include::modules/migration-dvm-error-node-selectors.adoc[leveloffset=+2] -include::modules/migration-error-messages.adoc[leveloffset=+2] -include::modules/migration-known-issues.adoc[leveloffset=+2] - -[id="rolling-back-migration_{context}"] -== Rolling back a migration - -You can roll back a migration by using the {mtc-short} web console or the CLI. - -You can also xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-rolling-back-migration-manually_troubleshooting-3-4[roll back a migration manually]. - -include::modules/migration-rolling-back-migration-web-console.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-cli.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-uninstalling_{context}"] -[discrete] -=== Additional resources - -* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-cluster[Deleting Operators from a cluster using the web console] - -:troubleshooting-3-4!: diff --git a/migrating_from_ocp_3_to_4/upgrading-3-4.adoc b/migrating_from_ocp_3_to_4/upgrading-3-4.adoc deleted file mode 100644 index 2b5567c7de14..000000000000 --- a/migrating_from_ocp_3_to_4/upgrading-3-4.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-3-4"] -= Upgrading the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: upgrading-3-4 -:upgrading-3-4: - -toc::[] - -You can upgrade the {mtc-full} ({mtc-short}) on {product-title} {product-version} by using Operator Lifecycle Manager. - -You can upgrade {mtc-short} on {product-title} 3 by reinstalling the legacy {mtc-full} Operator. - -[IMPORTANT] -==== -If you are upgrading from {mtc-short} version 1.3, you must perform an additional procedure to update the `MigPlan` custom resource (CR). -==== - -include::modules/migration-upgrading-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-upgrading-mtc-with-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-upgrading-from-mtc-1-3.adoc[leveloffset=+1] -:upgrading-3-4!: diff --git a/migration_toolkit_for_containers/_attributes b/migration_toolkit_for_containers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/migration_toolkit_for_containers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/migration_toolkit_for_containers/about-mtc.adoc b/migration_toolkit_for_containers/about-mtc.adoc deleted file mode 100644 index ada85b70b63e..000000000000 --- a/migration_toolkit_for_containers/about-mtc.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-mtc"] -= About the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: about-mtc - -toc::[] - -The {mtc-full} ({mtc-short}) enables you to migrate stateful application workloads between {product-title} 4 clusters at the granularity of a namespace. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -You can migrate applications within the same cluster or between clusters by using state migration. - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -The {mtc-short} console is installed on the target cluster by default. You can configure the {mtc-full} Operator to install the console on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#advanced-migration-options-mtc[Advanced migration options] for information about the following topics: - -* Automating your migration with migration hooks and the {mtc-short} API. -* Configuring your migration plan to exclude resources, support large-scale migrations, and enable automatic PV resizing for direct volume migration. - -include::modules/migration-terminology.adoc[leveloffset=+1] -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] -include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1] -include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc b/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc deleted file mode 100644 index 65b150a366ba..000000000000 --- a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-migration-options-mtc"] -= Advanced migration options -include::_attributes/common-attributes.adoc[] -:context: advanced-migration-options-mtc - -toc::[] - -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. - -include::modules/migration-terminology.adoc[leveloffset=+1] - -[id="migrating-applications-cli_{context}"] -== Migrating applications by using the command line - -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. - -include::modules/migration-prerequisites.adoc[leveloffset=+2] -include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+2] -include::modules/migration-configuring-proxies.adoc[leveloffset=+3] -include::modules/migration-migrating-applications-api.adoc[leveloffset=+2] -include::modules/migration-state-migration-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-state-migration_{context}"] -[discrete] -=== Additional resources - -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-excluding-pvcs_advanced-migration-options-mtc[Excluding PVCs from migration] to select PVCs for state migration. -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-mapping-pvcs_advanced-migration-options-mtc[Mapping PVCs] to migrate source PV data to provisioned PVCs on the destination cluster. -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-kubernetes-objects_advanced-migration-options-mtc[Migrating Kubernetes objects] to migrate the Kubernetes objects that constitute an application's state. - -include::modules/migration-hooks.adoc[leveloffset=+1] -include::modules/migration-writing-ansible-playbook-hook.adoc[leveloffset=+2] - -[id="migration-plan-options_{context}"] -== Migration plan options - -You can exclude, edit, and map components in the `MigPlan` custom resource (CR). - -include::modules/migration-excluding-resources.adoc[leveloffset=+2] -include::modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc[leveloffset=+2] -include::modules/migration-excluding-pvcs.adoc[leveloffset=+2] -include::modules/migration-mapping-pvcs.adoc[leveloffset=+2] -include::modules/migration-editing-pvs-in-migplan.adoc[leveloffset=+2] -include::modules/migration-converting-storage-classes.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-editing-pv-attributes_{context}"] -[discrete] -==== Additional resources - -* For details about the `move` and `copy` actions, see xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[MTC workflow]. -* For details about the `skip` action, see xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-excluding-pvcs_advanced-migration-options-mtc[Excluding PVCs from migration]. -* For details about the file system and snapshot copy methods, see xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods]. - -include::modules/migration-kubernetes-objects.adoc[leveloffset=+2] - -[id="migration-controller-options_{context}"] -== Migration controller options - -You can edit migration plan limits, enable persistent volume resizing, or enable cached Kubernetes clients in the `MigrationController` custom resource (CR) for large migrations and improved performance. - -include::modules/migration-changing-migration-plan-limits.adoc[leveloffset=+2] -include::modules/migration-enabling-pv-resizing-dvm.adoc[leveloffset=+2] -include::modules/migration-enabling-cached-kubernetes-clients.adoc[leveloffset=+2] - -:advanced-migration-options-mtc!: diff --git a/migration_toolkit_for_containers/images b/migration_toolkit_for_containers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/migration_toolkit_for_containers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/migration_toolkit_for_containers/index.adoc b/migration_toolkit_for_containers/index.adoc deleted file mode 100644 index 3b1383756e43..000000000000 --- a/migration_toolkit_for_containers/index.adoc +++ /dev/null @@ -1,68 +0,0 @@ -[id="migration-toolkit-for-containers-overview"] -= Migration toolkit for containers overview -include::_attributes/common-attributes.adoc[] -:context: migration-toolkit-for-containers-overview - -toc::[] - -You can migrate stateful application workloads between {product-title} 4 clusters at the granularity of a namespace by using the Migration Toolkit for Containers (MTC). To learn more about MTC see xref:../migration_toolkit_for_containers/about-mtc.adoc#about-mtc[understanding MTC]. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[about migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -[id="mtc-overview-install-mtc"] -== Installing MTC -You must install the Migration Toolkit for Containers Operator that is compatible for your {product-title} version: - -* {product-title} 4.6 and later versions: xref:../migration_toolkit_for_containers/installing-mtc.adoc#installing-mtc[Install the Migration Toolkit for Containers Operator by using Operator Lifecycle Manager (OLM)]. -* {product-title} 4.5 and earlier versions: xref:../migration_toolkit_for_containers/installing-mtc.adoc#configuring-replication-repository_installing-mtc[Manually install the legacy Migration Toolkit for Containers Operator]. - -Then you xref:../migration_toolkit_for_containers/installing-mtc.adoc#configuring-replication-repository_installing-mtc[configure object storage to use as a replication repository]. - -[id="mtc-overview-upgrade-mtc"] -== Upgrading MTC -You can xref:../migration_toolkit_for_containers/upgrading-mtc.adoc#upgrading-mtc[upgrade the MTC] by using OLM. - -[id="mtc-overview-mtc-checklists"] -== Reviewing MTC checklists -Before you migrate your application workloads with the Migration Toolkit for Containers (MTC), review the xref:../migration_toolkit_for_containers/premigration-checklists-mtc.adoc#premigration-checklists-mtc[premigration checklists]. - -[id="mtc-overview-migrate-mtc-applications"] -== Migrating applications -You can migrate your applications by using the MTC xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migrating-applications-mtc-web-console_migrating-applications-with-mtc[web console] or xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migrating-applications-cli_advanced-migration-options-mtc[the command line]. - -[id="mtc-overview-advanced-migration-options"] -== Advanced migration options -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. You can check the following items: - -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-creating-registry-route-for-dim_advanced-migration-options-mtc[Create a registry route for direct image migration] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-configuring-proxies_advanced-migration-options-mtc[Configuring proxies] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-migrating-applications-api_advanced-migration-options-mtc[Migrating an application by using the MTC API] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-state-migration-cli_advanced-migration-options-mtc[Running a state migration] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-hooks_advanced-migration-options-mtc[Creating migration hooks] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-plan-options_advanced-migration-options-mtc[Editing, excluding, and mapping migrated resources] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-controller-options_advanced-migration-options-mtc[Configuring the migration controller for large migrations] - -[id="mtc-overview-troubleshooting-mtc"] -== Troubleshooting migrations -You can perform the following troubleshooting tasks: - -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-viewing-migration-plan-resources_troubleshooting-mtc[Viewing plan resources] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-viewing-migration-plan-log_troubleshooting-mtc[Viewing the migration plan aggregated log file] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-mig-log-reader_troubleshooting-mtc[Using the migration log reader] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-accessing-performance-metrics_troubleshooting-mtc[Accessing performance metrics] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-must-gather_troubleshooting-mtc[Using the `must-gather` tool] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-debugging-velero-resources_troubleshooting-mtc[Using the Velero CLI to debug Backup and Restore CRs] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-partial-failure-velero_troubleshooting-mtc[Debugging a partial migration failure] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-mtc-crs-for-troubleshooting_troubleshooting-mtc[Using MTC custom resources for troubleshooting] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#common-issues-and-concerns_troubleshooting-mtc[Checking common issues and concerns] - -[id="mtc-overview-roll-back-mtc"] -== Rolling back a migration -You can xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#rolling-back-migration_troubleshooting-mtc[roll back a migration] by using the MTC web console, the CLI or manually. - -[id="mtc-overview-uninstall-mtc"] -== Uninstalling MTC and deleting resources -You can xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-uninstalling-mtc-clean-up_installing-mtc[uninstall the MTC and delete its resources] to clean up the cluster. diff --git a/migration_toolkit_for_containers/installing-mtc-restricted.adoc b/migration_toolkit_for_containers/installing-mtc-restricted.adoc deleted file mode 100644 index 219633728254..000000000000 --- a/migration_toolkit_for_containers/installing-mtc-restricted.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mtc-restricted"] -= Installing the Migration Toolkit for Containers in a restricted network environment -include::_attributes/common-attributes.adoc[] -:context: installing-mtc-restricted -:installing-mtc-restricted: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 4 in a restricted network environment by performing the following procedures: - -. Create a xref:../operators/admin/olm-restricted-networks.adoc#olm-mirror-catalog_olm-restricted-networks[mirrored Operator catalog]. -+ -This process creates a `mapping.txt` file, which contains the mapping between the `registry.redhat.io` image and your mirror registry image. The `mapping.txt` file is required for installing the _legacy_ {mtc-full} Operator on an {product-title} 4.2 to 4.5 source cluster. -. Install the {mtc-full} Operator on the {product-title} {product-version} target cluster by using Operator Lifecycle Manager. -+ -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -. Install the {mtc-full} Operator on the source cluster: - -* {product-title} 4.6 or later: Install the {mtc-full} Operator by using Operator Lifecycle Manager. -* {product-title} 4.2 to 4.5: Install the legacy {mtc-full} Operator from the command line interface. - -. Configure object storage to use as a replication repository. - -[NOTE] -==== -To install {mtc-short} on {product-title} 3, see xref:../migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc#migration-installing-legacy-operator_installing-restricted-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== -To uninstall {mtc-short}, see xref:../migration_toolkit_for_containers/installing-mtc-restricted.adoc#migration-uninstalling-mtc-clean-up_installing-mtc-restricted[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="migration-rsync-root-non-root_{context}"] -== Running Rsync as either root or non-root - -[IMPORTANT] -==== -This section applies only when you are working with the OpenShift API, not the web console. -==== - -OpenShift environments have the `PodSecurityAdmission` controller enabled by default. This controller requires cluster administrators to enforce Pod Security Standards by means of namespace labels. All workloads in the cluster are expected to run one of the following Pod Security Standard levels: `Privileged`, `Baseline` or `Restricted`. Every cluster has its own default policy set. - -To guarantee successful data transfer in all environments, {mtc-full} ({mtc-short}) 1.7.5 introduced changes in Rsync pods, including running Rsync pods as non-root user by default. This ensures that data transfer is possible even for workloads that do not necessarily require higher privileges. This change was made because it is best to run workloads with the lowest level of privileges possible. - -[discrete] -[id="migration-rsync-override-data-transfer_{context}"] -=== Manually overriding default non-root operation for data transfer - -Although running Rsync pods as non-root user works in most cases, data transfer might fail when you run workloads as root user on the source side. {mtc-short} provides two ways to manually override default non-root operation for data transfer: - -* Configure all migrations to run an Rsync pod as root on the destination cluster for all migrations. -* Run an Rsync pod as root on the destination cluster per migration. - -In both cases, you must set the following labels on the source side of any namespaces that are running workloads with higher privileges prior to migration: `enforce`, `audit`, and `warn.` - -To learn more about Pod Security Admission and setting values for labels, see xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-opting_understanding-and-managing-pod-security-admission[Controlling pod security admission synchronization]. - -include::modules/migration-rsync-migration-controller-root-non-root.adoc[leveloffset=+1] - -include::modules/migration-rsync-mig-migration-root-non-root.adoc[leveloffset=+1] - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -The Multicloud Object Gateway is the only supported option for a restricted network environment. - -{mtc-short} supports the xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9/html/planning_your_deployment/disconnected-environment_rhodf[Disconnected environment] in the {rh-storage-first} documentation. -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[{mtc-short} workflow] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods] -* xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-adding-replication-repository-to-cam_migrating-applications-with-mtc[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] - -:installing-mtc-restricted!: diff --git a/migration_toolkit_for_containers/installing-mtc.adoc b/migration_toolkit_for_containers/installing-mtc.adoc deleted file mode 100644 index b9d233b2a2b2..000000000000 --- a/migration_toolkit_for_containers/installing-mtc.adoc +++ /dev/null @@ -1,94 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mtc"] -= Installing the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: installing-mtc -:installing-mtc: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 4. - -[NOTE] -==== -To install {mtc-short} on {product-title} 3, see xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -After you have installed {mtc-short}, you must configure an object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-uninstalling-mtc-clean-up_installing-mtc[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="migration-rsync-root-non-root_{context}"] -=== Running Rsync as either root or non-root - -[IMPORTANT] -==== -This section applies only when you are working with the OpenShift API, not the web console. -==== - -OpenShift environments have the `PodSecurityAdmission` controller enabled by default. This controller requires cluster administrators to enforce Pod Security Standards by means of namespace labels. All workloads in the cluster are expected to run one of the following Pod Security Standard levels: `Privileged`, `Baseline` or `Restricted`. Every cluster has its own default policy set. - -To guarantee successful data transfer in all environments, {mtc-full} ({mtc-short}) 1.7.5 introduced changes in Rsync pods, including running Rsync pods as non-root user by default. This ensures that data transfer is possible even for workloads that do not necessarily require higher privileges. This change was made because it is best to run workloads with the lowest level of privileges possible. - -==== Manually overriding default non-root operation for data trannsfer - -Although running Rsync pods as non-root user works in most cases, data transfer might fail when you run workloads as root user on the source side. {mtc-short} provides two ways to manually override default non-root operation for data transfer: - -* Configure all migrations to run an Rsync pod as root on the destination cluster for all migrations. -* Run an Rsync pod as root on the destination cluster per migration. - -In both cases, you must set the following labels on the source side of any namespaces that are running workloads with higher privileges prior to migration: `enforce`, `audit`, and `warn.` - -To learn more about Pod Security Admission and setting values for labels, see xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-opting_understanding-and-managing-pod-security-admission[Controlling pod security admission synchronization]. - -include::modules/migration-rsync-migration-controller-root-non-root.adoc[leveloffset=+2] - -include::modules/migration-rsync-mig-migration-root-non-root.adoc[leveloffset=+2] - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -You must configure an object storage to use as a replication repository. The {mtc-full} ({mtc-short}) copies data from the source cluster to the replication repository, and then from the replication repository to the target cluster. - -{mtc-short} supports the xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. Select a method that is suited for your environment and is supported by your storage provider. - -{mtc-short} supports the following storage providers: - -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-mcg_installing-mtc[Multicloud Object Gateway] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-aws-s3_installing-mtc[Amazon Web Services S3] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-gcp_installing-mtc[Google Cloud Platform] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-azure_installing-mtc[Microsoft Azure Blob] -* Generic S3 object storage, for example, Minio or Ceph S3 - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+2] -include::modules/migration-configuring-gcp.adoc[leveloffset=+2] -include::modules/migration-configuring-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[{mtc-short} workflow] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods] -* xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-adding-replication-repository-to-cam_migrating-applications-with-mtc[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] - -:installing-mtc!: diff --git a/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc b/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc deleted file mode 100644 index 27bb37cfbe5d..000000000000 --- a/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="migrating-applications-with-mtc"] -= Migrating your applications -include::_attributes/common-attributes.adoc[] -:context: migrating-applications-with-mtc - -toc::[] - -You can migrate your applications by using the {mtc-full} ({mtc-short}) web console or the xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migrating-applications-cli_advanced-migration-options-mtc[command line]. - -Most cluster-scoped resources are not yet handled by {mtc-short}. If your applications require cluster-scoped resources, you might have to create them manually on the target cluster. - -You can use stage migration and cutover migration to migrate an application between clusters: - -* Stage migration copies data from the source cluster to the target cluster without stopping the application. You can run a stage migration multiple times to reduce the duration of the cutover migration. -* Cutover migration stops the transactions on the source cluster and moves the resources to the target cluster. - -You can use state migration to migrate an application's state: - -* State migration copies selected persistent volume claims (PVCs). -* You can use state migration to migrate a namespace within the same cluster. - -During migration, the {mtc-full} ({mtc-short}) preserves the following namespace annotations: - -* `openshift.io/sa.scc.mcs` -* `openshift.io/sa.scc.supplemental-groups` -* `openshift.io/sa.scc.uid-range` -+ -These annotations preserve the UID range, ensuring that the containers retain their file system permissions on the target cluster. There is a risk that the migrated UIDs could duplicate UIDs within an existing or future namespace on the target cluster. - -include::modules/migration-prerequisites.adoc[leveloffset=+1] - -[id="migrating-applications-mtc-web-console_{context}"] -== Migrating your applications by using the {mtc-short} web console - -You can configure clusters and a replication repository by using the {mtc-short} web console. Then, you can create and run a migration plan. - -include::modules/migration-launching-cam.adoc[leveloffset=+2] -include::modules/migration-adding-cluster-to-cam.adoc[leveloffset=+2] -include::modules/migration-adding-replication-repository-to-cam.adoc[leveloffset=+2] -include::modules/migration-creating-migration-plan-cam.adoc[leveloffset=+2] - -[discrete] -[id="additional-resources-for-persistent-volume-copy-methods_{context}"] -[role="_additional-resources"] -=== Additional resources for persistent volume copy methods - -* xref:../migration_toolkit_for_containers/about-mtc.adoc#file-system-copy-method_about-mtc[{mtc-short} file system copy method] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#snapshot-copy-method_about-mtc[{mtc-short} snapshot copy method] - -include::modules/migration-running-migration-plan-cam.adoc[leveloffset=+2] diff --git a/migration_toolkit_for_containers/modules b/migration_toolkit_for_containers/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/migration_toolkit_for_containers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/migration_toolkit_for_containers/mtc-release-notes.adoc b/migration_toolkit_for_containers/mtc-release-notes.adoc deleted file mode 100644 index 27843cd26105..000000000000 --- a/migration_toolkit_for_containers/mtc-release-notes.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="mtc-release-notes"] -= Migration Toolkit for Containers release notes -include::_attributes/common-attributes.adoc[] -:context: mtc-release-notes - -toc::[] - -The release notes for {mtc-full} ({mtc-short}) describe new features and enhancements, deprecated features, and known issues. - -The {mtc-short} enables you to migrate application workloads between {product-title} clusters at the granularity of a namespace. - -You can migrate from xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc[{product-title} 3 to {product-version}] and between {product-title} 4 clusters. - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -For information on the support policy for {mtc-short}, see link:https://access.redhat.com/support/policy/updates/openshift#app_migration[OpenShift Application and Cluster Migration Solutions], part of the _Red Hat {product-title} Life Cycle Policy_. - -include::modules/migration-mtc-release-notes-1-7-10.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-7.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-6.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-5.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/network-considerations-mtc.adoc b/migration_toolkit_for_containers/network-considerations-mtc.adoc deleted file mode 100644 index 14dc958da62b..000000000000 --- a/migration_toolkit_for_containers/network-considerations-mtc.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="network-considerations-mtc"] -= Network considerations -include::_attributes/common-attributes.adoc[] -:context: network-considerations-mtc - -toc::[] - -Review the strategies for redirecting your application network traffic after migration. - -[id="dns-considerations_{context}"] -== DNS considerations - -The DNS domain of the target cluster is different from the domain of the source cluster. By default, applications get FQDNs of the target cluster after migration. - -To preserve the source DNS domain of migrated applications, select one of the two options described below. - -include::modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc[leveloffset=+2] -include::modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../security/certificates/replacing-default-ingress-certificate.adoc#replacing-default-ingress[Replacing the default ingress certificate] for more information. - -include::modules/migration-network-traffic-redirection-strategies.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/premigration-checklists-mtc.adoc b/migration_toolkit_for_containers/premigration-checklists-mtc.adoc deleted file mode 100644 index f56524d9775c..000000000000 --- a/migration_toolkit_for_containers/premigration-checklists-mtc.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="premigration-checklists-mtc"] -= Premigration checklists -include::_attributes/common-attributes.adoc[] -:context: premigration-checklists-mtc - -toc::[] - -Before you migrate your application workloads with the {mtc-full} ({mtc-short}), review the following checklists. - -[id="cluster-health-checklist_{context}"] -== Cluster health checklist - -* [ ] The clusters meet the minimum hardware requirements for the specific platform and installation method, for example, on xref:../installing/installing_bare_metal/installing-bare-metal.adoc#minimum-resource-requirements_installing-bare-metal[bare metal]. -* [ ] All xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-prerequisites_migrating-applications-with-mtc[{mtc-short} prerequisites] are met. -* [ ] All nodes have an active {product-title} subscription. -* [ ] You have xref:../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[verified node health]. -* [ ] The xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[identity provider] is working. -* [ ] The migration network has a minimum throughput of 10 Gbps. -* [ ] The clusters have sufficient resources for migration. -+ -[NOTE] -==== -Clusters require additional memory, CPUs, and storage in order to run a migration on top of normal workloads. Actual resource requirements depend on the number of Kubernetes resources being migrated in a single migration plan. You must test migrations in a non-production environment in order to estimate the resource requirements. -==== - -* [ ] The link:https://access.redhat.com/solutions/4885641[etcd disk performance] of the clusters has been checked with `fio`. - -[id="source-cluster-checklist_{context}"] -== Source cluster checklist - -* [ ] You have checked for persistent volumes (PVs) with abnormal configurations stuck in a *Terminating* state by running the following command: -+ -[source,terminal] ----- -$ oc get pv ----- - -* [ ] You have checked for pods whose status is other than *Running* or *Completed* by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces | egrep -v 'Running | Completed' ----- - -* [ ] You have checked for pods with a high restart count by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces --field-selector=status.phase=Running \ - -o json | jq '.items[]|select(any( .status.containerStatuses[]; \ - .restartCount > 3))|.metadata.name' ----- -+ -Even if the pods are in a *Running* state, a high restart count might indicate underlying problems. - -* [ ] The cluster certificates are valid for the duration of the migration process. -* [ ] You have checked for pending certificate-signing requests by running the following command: -+ -[source,terminal] ----- -$ oc get csr -A | grep pending -i ----- - -* [ ] The registry uses a xref:../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[recommended storage type]. -* [ ] You can read and write images to the registry. -* [ ] The link:https://access.redhat.com/articles/3093761[etcd cluster] is healthy. -* [ ] The xref:../post_installation_configuration/node-tasks.adoc#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-node-tasks[average API server response time] on the source cluster is less than 50 ms. - -[id="target-cluster-checklist_{context}"] -== Target cluster checklist - -* [ ] The cluster has the correct network configuration and permissions to access external services, for example, databases, source code repositories, container image registries, and CI/CD tools. -* [ ] External applications and services that use services provided by the cluster have the correct network configuration and permissions to access the cluster. -* [ ] Internal container image dependencies are met. -* [ ] The target cluster and the replication repository have sufficient storage space. diff --git a/migration_toolkit_for_containers/snippets b/migration_toolkit_for_containers/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/migration_toolkit_for_containers/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/migration_toolkit_for_containers/troubleshooting-mtc.adoc b/migration_toolkit_for_containers/troubleshooting-mtc.adoc deleted file mode 100644 index 611c367a20f1..000000000000 --- a/migration_toolkit_for_containers/troubleshooting-mtc.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-mtc"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-mtc -:troubleshooting-mtc: -:namespace: openshift-migration -:local-product: {mtc-short} -:must-gather: registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v{mtc-version} - -toc::[] - -This section describes resources for troubleshooting the {mtc-full} ({mtc-short}). - -For known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. - -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] - -[discrete] -include::modules/migration-about-mtc-custom-resources.adoc[leveloffset=+2] - -include::modules/migration-mtc-cr-manifests.adoc[leveloffset=+1] - -[id="logs-and-debugging-tools_{context}"] -== Logs and debugging tools - -This section describes logs and debugging tools that you can use for troubleshooting. - -include::modules/migration-viewing-migration-plan-resources.adoc[leveloffset=+2] -include::modules/migration-viewing-migration-plan-log.adoc[leveloffset=+2] -include::modules/migration-using-mig-log-reader.adoc[leveloffset=+2] -include::modules/migration-accessing-performance-metrics.adoc[leveloffset=+2] -include::modules/migration-provided-metrics.adoc[leveloffset=+3] -include::modules/migration-using-must-gather.adoc[leveloffset=+2] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+2] -include::modules/migration-partial-failure-velero.adoc[leveloffset=+2] -include::modules/migration-using-mtc-crs-for-troubleshooting.adoc[leveloffset=+2] - -[id="common-issues-and-concerns_{context}"] -== Common issues and concerns - -This section describes common issues and concerns that can cause issues during migration. - -include::modules/migration-dvm-error-node-selectors.adoc[leveloffset=+2] -include::modules/migration-error-messages.adoc[leveloffset=+2] - -[id="rolling-back-migration_{context}"] -== Rolling back a migration - -You can roll back a migration by using the {mtc-short} web console or the CLI. - -You can also xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-rolling-back-migration-manually_troubleshooting-mtc[roll back a migration manually]. - -include::modules/migration-rolling-back-migration-web-console.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-cli.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-uninstalling_{context}"] -[discrete] -=== Additional resources - -* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-cluster[Deleting Operators from a cluster using the web console] - -:troubleshooting-mtc!: diff --git a/migration_toolkit_for_containers/upgrading-mtc.adoc b/migration_toolkit_for_containers/upgrading-mtc.adoc deleted file mode 100644 index 3d95d8f4981b..000000000000 --- a/migration_toolkit_for_containers/upgrading-mtc.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-mtc"] -= Upgrading the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: upgrading-mtc -:upgrading-mtc: - -toc::[] - -You can upgrade the {mtc-full} ({mtc-short}) on {product-title} {product-version} by using Operator Lifecycle Manager. - -You can upgrade {mtc-short} on {product-title} 4.5, and earlier versions, by reinstalling the legacy {mtc-full} Operator. - -[IMPORTANT] -==== -If you are upgrading from {mtc-short} version 1.3, you must perform an additional procedure to update the `MigPlan` custom resource (CR). -==== - -include::modules/migration-upgrading-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-upgrading-mtc-with-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-upgrading-from-mtc-1-3.adoc[leveloffset=+1] -:upgrading-mtc!: diff --git a/mod_docs_guide/_attributes b/mod_docs_guide/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/mod_docs_guide/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/mod_docs_guide/getting-started-modular-docs-ocp.adoc b/mod_docs_guide/getting-started-modular-docs-ocp.adoc deleted file mode 100644 index e68883c24bac..000000000000 --- a/mod_docs_guide/getting-started-modular-docs-ocp.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Base the file name and the ID on the assembly title. For example: -// * file name: my-assembly-a.adoc -// * ID: [id="my-assembly-a"] -// * Title: = My assembly A - -// Choose a context that is not too long and encapsulates what this assembly or -// module is about. Context MUST be unique. - -// Not sure if this guide is correct with the current guidelines anymore. Need to validate. - -[id="getting-started-modular-docs-ocp"] -= Getting started with modular docs on OpenShift -include::_attributes/common-attributes.adoc[] -:context: assembly-gsg - -toc::[] - -This is the modular docs getting started guide for the OpenShift documentation -team and anyone who might be contributing content to it. - -This guide has been written using the format of the modular docs -initiative. - -== Prerequisites - -* You have read through and familiarized yourself with the -link:https://redhat-documentation.github.io/modular-docs[Red Hat CCS modular docs guide]. -* You have reviewed -xref:../mod_docs_guide/mod-docs-conventions-ocp.adoc#mod-docs-ocp-references[the Modular Docs OpenShift Conventions]. -* [Optional] You have received the modular docs training. -* You know how to use Git. - -include::modules/creating-your-first-content.adoc[leveloffset=+1] - -// include::modules/mod-docs-ocp-conventions.adoc[leveloffset+=1] diff --git a/mod_docs_guide/images b/mod_docs_guide/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/mod_docs_guide/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/mod_docs_guide/mod-docs-conventions-ocp.adoc b/mod_docs_guide/mod-docs-conventions-ocp.adoc deleted file mode 100644 index 6fb7008ef877..000000000000 --- a/mod_docs_guide/mod-docs-conventions-ocp.adoc +++ /dev/null @@ -1,19 +0,0 @@ - -// Base the file name and the ID on the assembly title. For example: -// * file name: my-assembly-a.adoc -// * ID: [id="my-assembly-a"] -// * Title: = My assembly A - -// Choose a context that is not too long and encapsulates what this assembly or -// module is about. Context MUST be unique across the docs set. - -[id="mod-docs-ocp-references"] -= Modular docs OpenShift conventions -include::_attributes/common-attributes.adoc[] -:context: assembly-ocp-conventions - -toc::[] - -Before you contribute to the OpenShift docs repo, review the following modular docs conventions. - -include::modules/mod-docs-ocp-conventions.adoc[leveloffset=+1] diff --git a/mod_docs_guide/modules b/mod_docs_guide/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/mod_docs_guide/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/mod_docs_guide/snippets b/mod_docs_guide/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/mod_docs_guide/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/modules/.vale.ini b/modules/.vale.ini deleted file mode 100644 index 92a97cdf5340..000000000000 --- a/modules/.vale.ini +++ /dev/null @@ -1,12 +0,0 @@ -StylesPath = ../.vale/styles - -MinAlertLevel = suggestion - -Vocab = OpenShiftDocs - -[[!.]*.adoc] -BasedOnStyles = OpenShiftAsciiDoc, AsciiDoc, RedHat - -# Use local OpenShiftDocs Vocab terms -Vale.Terms = YES -Vale.Avoid = YES diff --git a/modules/about-administrator-perspective.adoc b/modules/about-administrator-perspective.adoc deleted file mode 100644 index f2a4c30e6501..000000000000 --- a/modules/about-administrator-perspective.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web-console-overview.adoc - -:_content-type: CONCEPT -[id="about-administrator-perspective_{context}"] -= About the Administrator perspective in the web console - -The *Administrator* perspective enables you to view the cluster inventory, capacity, general and specific utilization information, and the stream of important events, all of which help you to simplify planning and troubleshooting tasks. Both project administrators and cluster administrators can view the *Administrator* perspective. - -Cluster administrators can also open an embedded command line terminal instance with the web terminal Operator in {product-title} 4.7 and later. - -[NOTE] -==== -The default web console perspective that is shown depends on the role of the user. The *Administrator* perspective is displayed by default if the user is recognized as an administrator. -==== - -The *Administrator* perspective provides workflows specific to administrator use cases, such as the ability to: - -* Manage workload, storage, networking, and cluster settings. -* Install and manage Operators using the Operator Hub. -* Add identity providers that allow users to log in and manage user access through roles and role bindings. -* View and manage a variety of advanced settings such as cluster updates, partial cluster updates, cluster Operators, custom resource definitions (CRDs), role bindings, and resource quotas. -* Access and manage monitoring features such as metrics, alerts, and monitoring dashboards. -* View and manage logging, metrics, and high-status information about the cluster. -* Visually interact with applications, components, and services associated with the *Administrator* perspective in {product-title}. diff --git a/modules/about-bare-metal-hosts-and-nodes.adoc b/modules/about-bare-metal-hosts-and-nodes.adoc deleted file mode 100644 index abfa0610db47..000000000000 --- a/modules/about-bare-metal-hosts-and-nodes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: CONCEPT -[id="about-bare-metal-hosts-and-nodes_{context}"] -= About bare metal hosts and nodes - -To provision a {op-system-first} bare metal host as a node in your cluster, first create a `MachineSet` custom resource (CR) object that corresponds to the bare metal host hardware. Bare metal host compute machine sets describe infrastructure components specific to your configuration. You apply specific Kubernetes labels to these compute machine sets and then update the infrastructure components to run on only those machines. - -`Machine` CR's are created automatically when you scale up the relevant `MachineSet` containing a `metal3.io/autoscale-to-hosts` annotation. {product-title} uses `Machine` CR's to provision the bare metal node that corresponds to the host as specified in the `MachineSet` CR. diff --git a/modules/about-cli-profiles-switch.adoc b/modules/about-cli-profiles-switch.adoc deleted file mode 100644 index 9dad337a6cf1..000000000000 --- a/modules/about-cli-profiles-switch.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/managing-cli-profiles.adoc - -:_content-type: CONCEPT -[id="about-switches-between-cli-profiles_{context}"] -= About switches between CLI profiles - -Contexts allow you to easily switch between multiple users across multiple {product-title} servers, or clusters, when using CLI operations. Nicknames make managing CLI configurations easier by providing short-hand references to contexts, user credentials, and cluster details. -After logging in with the CLI for the first time, {product-title} creates a `~/.kube/config` file if one does not already exist. As more authentication and connection details are provided to the CLI, either automatically during an `oc login` operation or by manually configuring CLI profiles, the updated information is stored in the configuration file: - -.CLI config file - -[source,yaml] ----- -apiVersion: v1 -clusters: <1> -- cluster: - insecure-skip-tls-verify: true - server: https://openshift1.example.com:8443 - name: openshift1.example.com:8443 -- cluster: - insecure-skip-tls-verify: true - server: https://openshift2.example.com:8443 - name: openshift2.example.com:8443 -contexts: <2> -- context: - cluster: openshift1.example.com:8443 - namespace: alice-project - user: alice/openshift1.example.com:8443 - name: alice-project/openshift1.example.com:8443/alice -- context: - cluster: openshift1.example.com:8443 - namespace: joe-project - user: alice/openshift1.example.com:8443 - name: joe-project/openshift1/alice -current-context: joe-project/openshift1.example.com:8443/alice <3> -kind: Config -preferences: {} -users: <4> -- name: alice/openshift1.example.com:8443 - user: - token: xZHd2piv5_9vQrg-SKXRJ2Dsl9SceNJdhNTljEKTb8k ----- - -<1> The `clusters` section defines connection details for {product-title} clusters, including the address for their master server. In this example, one cluster is nicknamed `openshift1.example.com:8443` and another is nicknamed `openshift2.example.com:8443`. -<2> This `contexts` section defines two contexts: one nicknamed `alice-project/openshift1.example.com:8443/alice`, using the `alice-project` project, `openshift1.example.com:8443` cluster, and `alice` user, and another nicknamed `joe-project/openshift1.example.com:8443/alice`, using the `joe-project` project, `openshift1.example.com:8443` cluster and `alice` user. -<3> The `current-context` parameter shows that the `joe-project/openshift1.example.com:8443/alice` context is currently in use, allowing the `alice` user to work in the `joe-project` project on the `openshift1.example.com:8443` cluster. -<4> The `users` section defines user credentials. In this example, the user nickname `alice/openshift1.example.com:8443` uses an access token. - -The CLI can support multiple configuration files which are loaded at runtime and merged together along with any override options specified from the command line. After you are logged in, you can use the `oc status` or `oc project` command to verify your current working environment: - -.Verify the current working environment - -[source,terminal,options="nowrap"] ----- -$ oc status ----- - -.Example output -[source,terminal] ----- -oc status -In project Joe's Project (joe-project) - -service database (172.30.43.12:5434 -> 3306) - database deploys docker.io/openshift/mysql-55-centos7:latest - #1 deployed 25 minutes ago - 1 pod - -service frontend (172.30.159.137:5432 -> 8080) - frontend deploys origin-ruby-sample:latest <- - builds https://github.com/openshift/ruby-hello-world with joe-project/ruby-20-centos7:latest - #1 deployed 22 minutes ago - 2 pods - -To see more information about a service or deployment, use 'oc describe service ' or 'oc describe dc '. -You can use 'oc get all' to see lists of each of the types described in this example. ----- - -.List the current project -[source,terminal,options="nowrap"] ----- -$ oc project ----- - -.Example output -[source,terminal] ----- -Using project "joe-project" from context named "joe-project/openshift1.example.com:8443/alice" on server "https://openshift1.example.com:8443". ----- - -You can run the `oc login` command again and supply the required information during the interactive process, to log in using any other combination of user credentials and cluster details. A context is constructed based on the supplied information if one does not already exist. If you are already logged in and want to switch to another project the current user already has access to, use the `oc project` command and enter the name of the project: - -[source,terminal,options="nowrap"] ----- -$ oc project alice-project ----- - -.Example output -[source,terminal] ----- -Now using project "alice-project" on server "https://openshift1.example.com:8443". ----- - -At any time, you can use the `oc config view` command to view your current CLI configuration, as seen in the output. Additional CLI configuration commands are also available for more advanced usage. - -[NOTE] -==== -If you have access to administrator credentials but are no longer logged in as the default system user `system:admin`, you can log back in as this user at any time as long as the credentials are still present in your CLI config file. The following command logs in and switches to the default project: - -[source,terminal] ----- -$ oc login -u system:admin -n default ----- -==== diff --git a/modules/about-crio.adoc b/modules/about-crio.adoc deleted file mode 100644 index 221569317b1c..000000000000 --- a/modules/about-crio.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-crio-issues.adoc - -:_content-type: CONCEPT -[id="about-crio_{context}"] -= About CRI-O container runtime engine - -include::snippets/about-crio-snippet.adoc[] - -When container runtime issues occur, verify the status of the `crio` systemd service on each node. Gather CRI-O journald unit logs from nodes that have container runtime issues. diff --git a/modules/about-developer-perspective.adoc b/modules/about-developer-perspective.adoc deleted file mode 100644 index 041fe893e496..000000000000 --- a/modules/about-developer-perspective.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web-console-overview.adoc - -:_content-type: CONCEPT -[id="about-developer-perspective_{context}"] -= About the Developer perspective in the web console - -The *Developer* perspective offers several built-in ways to deploy applications, services, and databases. In the *Developer* perspective, you can: - -* View real-time visualization of rolling and recreating rollouts on the component. -* View the application status, resource utilization, project event streaming, and quota consumption. -* Share your project with others. -* Troubleshoot problems with your applications by running Prometheus Query Language (PromQL) queries on your project and examining the metrics visualized on a plot. The metrics provide information about the state of a cluster and any user-defined workloads that you are monitoring. - -Cluster administrators can also open an embedded command line terminal instance in the web console in {product-title} 4.7 and later. - -[NOTE] -==== -The default web console perspective that is shown depends on the role of the user. The *Developer* perspective is displayed by default if the user is recognised as a developer. -==== - -The *Developer* perspective provides workflows specific to developer use cases, such as the ability to: - -* Create and deploy applications on {product-title} by importing existing codebases, images, and container files. -* Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status. -* Group components within an application and connect the components within and across applications. -* Integrate serverless capabilities (Technology Preview). -* Create workspaces to edit your application code using Eclipse Che. - -You can use the *Topology* view to display applications, components, and workloads of your project. If you have no workloads in the project, the *Topology* view will show some links to create or import them. You can also use the *Quick Search* to import components directly. - -.Additional Resources -See link:https://docs.openshift.com/container-platform/4.14/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective. diff --git a/modules/about-etcd-encryption.adoc b/modules/about-etcd-encryption.adoc deleted file mode 100644 index bd7cd292fb4e..000000000000 --- a/modules/about-etcd-encryption.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * security/encrypting-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: CONCEPT -[id="about-etcd_{context}"] -= About etcd encryption - -By default, etcd data is not encrypted in {product-title}. You can enable etcd encryption for your cluster to provide an additional layer of data security. For example, it can help protect the loss of sensitive data if an etcd backup is exposed to the incorrect parties. - -When you enable etcd encryption, the following OpenShift API server and Kubernetes API server resources are encrypted: - -* Secrets -* Config maps -* Routes -* OAuth access tokens -* OAuth authorize tokens - -When you enable etcd encryption, encryption keys are created. You must have these keys to restore from an etcd backup. - -[NOTE] -==== -Etcd encryption only encrypts values, not keys. Resource types, namespaces, and object names are unencrypted. - -If etcd encryption is enabled during a backup, the `__static_kuberesources_.tar.gz__` file contains the encryption keys for the etcd snapshot. For security reasons, store this file separately from the etcd snapshot. However, this file is required to restore a previous state of etcd from the respective etcd snapshot. -==== diff --git a/modules/about-gitops.adoc b/modules/about-gitops.adoc deleted file mode 100644 index 2eadd8ce7d4f..000000000000 --- a/modules/about-gitops.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module is included in the following assemblies: -// -// * openshift-docs/cicd/gitops/understanding-openshift-gitops.adoc - -:_content-type: CONCEPT -[id="about-gitops_{context}"] -= About GitOps - -GitOps is a declarative way to implement continuous deployment for cloud native applications. You can use GitOps to create repeatable processes for managing {product-title} clusters and applications across multi-cluster Kubernetes environments. GitOps handles and automates complex deployments at a fast pace, saving time during deployment and release cycles. - -The GitOps workflow pushes an application through development, testing, staging, and production. GitOps either deploys a new application or updates an existing one, so you only need to update the repository; GitOps automates everything else. - -GitOps is a set of practices that use Git pull requests to manage infrastructure and application configurations. In GitOps, the Git repository is the only source of truth for system and application configuration. This Git repository contains a declarative description of the infrastructure you need in your specified environment and contains an automated process to make your environment match the described state. Also, it contains the entire state of the system so that the trail of changes to the system state are visible and auditable. By using GitOps, you resolve the issues of infrastructure and application configuration sprawl. - -GitOps defines infrastructure and application definitions as code. Then, it uses this code to manage multiple workspaces and clusters to simplify the creation of infrastructure and application configurations. By following the principles of the code, you can store the configuration of clusters and applications in Git repositories, and then follow the Git workflow to apply these repositories to your chosen clusters. You can apply the core principles of developing and maintaining software in a Git repository to the creation and management of your cluster and application configuration files. diff --git a/modules/about-installing-oadp-on-multiple-namespaces.adoc b/modules/about-installing-oadp-on-multiple-namespaces.adoc deleted file mode 100644 index a45f74871bd9..000000000000 --- a/modules/about-installing-oadp-on-multiple-namespaces.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/installing/about-installing-oadp.adoc - - -:_content-type: CONCEPT -[id="about-installing-oadp-on-multiple-namespaces_{context}"] -= Installation of OADP on multiple namespaces - -You can install OADP into multiple namespaces on the same cluster so that multiple project owners can manage their own OADP instance. This use case has been validated with Restic and CSI. - -You install each instance of OADP as specified by the per-platform procedures contained in this document with the following additional requirements: - -* All deployments of OADP on the same cluster must be the same version, for example, 1.1.4. Installing different versions of OADP on the same cluster is *not* supported. -* Each individual deployment of OADP must have a unique set of credentials and a unique `BackupStorageLocation` configuration. -* By default, each OADP deployment has cluster-level access across namespaces. {product-title} administrators need to review security and RBAC settings carefully and make any necessary changes to them to ensure that each OADP instance has the correct permissions. - - - - diff --git a/modules/about-manually-maintained-credentials-upgrade.adoc b/modules/about-manually-maintained-credentials-upgrade.adoc deleted file mode 100644 index e13c4ff9432e..000000000000 --- a/modules/about-manually-maintained-credentials-upgrade.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: CONCEPT - -[id="about-manually-maintained-credentials-upgrade_{context}"] -= Update requirements for clusters with manually maintained credentials - -Before you update a cluster that uses manually maintained credentials with the Cloud Credential Operator (CCO), you must update the cloud provider resources for the new release. - -If the cloud credential management for your cluster was configured using the CCO utility (`ccoctl`), use the `ccoctl` utility to update the resources. Clusters that were configured to use manual mode without the `ccoctl` utility require manual updates for the resources. - -After updating the cloud provider resources, you must update the `upgradeable-to` annotation for the cluster to indicate that it is ready to update. - -[NOTE] -==== -The process to update the cloud provider resources and the `upgradeable-to` annotation can only be completed by using command line tools. -==== - -[id="cco-platform-options_{context}"] -== Cloud credential configuration options and update requirements by platform type - -Some platforms only support using the CCO in one mode. For clusters that are installed on those platforms, the platform type determines the credentials update requirements. - -For platforms that support using the CCO in multiple modes, you must determine which mode the cluster is configured to use and take the required actions for that configuration. - -.Credentials update requirements by platform type -image::334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png[Decision tree showing the possible update paths for your cluster depending on the configured CCO credentials mode.] - -{rh-openstack-first} and VMware vSphere:: -These platforms do not support using the CCO in manual mode. Clusters on these platforms handle changes in cloud provider resources automatically and do not require an update to the `upgradeable-to` annotation. -+ -Administrators of clusters on these platforms should skip the manually maintained credentials section of the update process. - -{alibaba}, IBM Cloud, and Nutanix:: -Clusters installed on these platforms are configured using the `ccoctl` utility. -+ -Administrators of clusters on these platforms must take the following actions: -+ -. Configure the `ccoctl` utility for the new release. -. Use the `ccoctl` utility to update the cloud provider resources. -. Indicate that the cluster is ready to update with the `upgradeable-to` annotation. - -Microsoft Azure Stack Hub:: -These clusters use manual mode with long-lived credentials and do not use the `ccoctl` utility. -+ -Administrators of clusters on these platforms must take the following actions: -+ -. Manually update the cloud provider resources for the new release. -. Indicate that the cluster is ready to update with the `upgradeable-to` annotation. - -Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP):: -Clusters installed on these platforms support multiple CCO modes. -+ -The required update process depends on the mode that the cluster is configured to use. If you are not sure what mode the CCO is configured to use on your cluster, you can use the web console or the CLI to determine this information. diff --git a/modules/about-must-gather.adoc b/modules/about-must-gather.adoc deleted file mode 100644 index c6e31b347f20..000000000000 --- a/modules/about-must-gather.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * sandboxed_containers/troubleshooting-sandboxed-containers.adoc -// * virt/support/virt-collecting-virt-data.adoc -// * support/gathering-cluster-data.adoc -// * service_mesh/v2x/ossm-support.adoc -// * service_mesh/v1x/servicemesh-release-notes.adoc -// * serverless/serverless-support.adoc - -:_content-type: CONCEPT -[id="about-must-gather_{context}"] -= About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues, including: - -* Resource definitions -* Service logs - -By default, the `oc adm must-gather` command uses the default plugin image and writes into `./must-gather.local`. - -Alternatively, you can collect specific information by running the command with the appropriate arguments as described in the following sections: - -* To collect data related to one or more specific features, use the `--image` argument with an image, as listed in a following section. -+ -For example: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather \ - --image=registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v{HCOVersion} ----- - -* To collect the audit logs, use the `-- /usr/bin/gather_audit_logs` argument, as described in a following section. -+ -For example: -+ -[source,terminal] ----- -$ oc adm must-gather -- /usr/bin/gather_audit_logs ----- -+ -[NOTE] -==== -Audit logs are not collected as part of the default set of information to reduce the size of the files. -==== - -When you run `oc adm must-gather`, a new pod with a random name is created in a new project on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in the current working directory. - -For example: - -[source,terminal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -... -openshift-must-gather-5drcj must-gather-bklx4 2/2 Running 0 72s -openshift-must-gather-5drcj must-gather-s8sdh 2/2 Running 0 72s -... ----- -// todo: table or ref module listing available images? -Optionally, you can run the `oc adm must-gather` command in a specific namespace by using the `--run-namespace` option. - -For example: - -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --run-namespace \ - --image=registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v{HCOVersion} ----- \ No newline at end of file diff --git a/modules/about-oadp-update-channels.adoc b/modules/about-oadp-update-channels.adoc deleted file mode 100644 index 1a46ba12cfe3..000000000000 --- a/modules/about-oadp-update-channels.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/installing/about-installing-oadp.adoc - - -:_content-type: CONCEPT -[id="about-oadp-update-channels_{context}"] -= About OADP update channels - -When you install an OADP Operator, you choose an _update channel_. This channel determines which upgrades to the OADP Operator and to Velero you receive. You can switch channels at any time. - -The following update channels are available: - -* The *stable* channel is now deprecated. The *stable* channel contains the patches (z-stream updates) of OADP `ClusterServiceVersion` for `oadp.v1.1.z` and older versions from `oadp.v1.0.z`. - -* The *stable-1.0* channel contains `oadp.v1.0._z_`, the most recent OADP 1.0 `ClusterServiceVersion`. - -* The *stable-1.1* channel contains `oadp.v1.1._z_`, the most recent OADP 1.1 `ClusterServiceVersion`. - -* The *stable-1.2* channel contains `oadp.v1.2._z_`, the most recent OADP 1.2 `ClusterServiceVersion`. - -*Which update channel is right for you?* - -* The *stable* channel is now deprecated. If you are already using the stable channel, you will continue to get updates from `oadp.v1.1._z_`. - -* Choose the *stable-1._y_* update channel to install OADP 1._y_ and to continue receiving patches for it. If you choose this channel, you will receive all z-stream patches for version 1._y_._z_. - -*When must you switch update channels?* - -* If you have OADP 1._y_ installed, and you want to receive patches only for that y-stream, you must switch from the *stable* update channel to the *stable-1._y_* update channel. You will then receive all z-stream patches for version 1._y_._z_. - -* If you have OADP 1.0 installed, want to upgrade to OADP 1.1, and then receive patches only for OADP 1.1, you must switch from the *stable-1.0* update channel to the *stable-1.1* update channel. You will then receive all z-stream patches for version 1.1._z_. - -* If you have OADP 1._y_ installed, with _y_ greater than 0, and want to switch to OADP 1.0, you must _uninstall_ your OADP Operator and then reinstall it using the *stable-1.0* update channel. You will then receive all z-stream patches for version 1.0._z_. - -[NOTE] -==== -You cannot switch from OADP 1._y_ to OADP 1.0 by switching update channels. You must uninstall the Operator and then reinstall it. -==== diff --git a/modules/about-project-creation.adoc b/modules/about-project-creation.adoc deleted file mode 100644 index f8fcbae54cc7..000000000000 --- a/modules/about-project-creation.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/configuring-project-creation.adoc - -:_content-type: CONCEPT -[id="about-project-creation_{context}"] -= About project creation - -The {product-title} API server automatically provisions new projects based on -the project template that is identified by the `projectRequestTemplate` -parameter in the cluster's project configuration resource. If the parameter is -not defined, the API server creates a default template that creates a project -with the requested name, and assigns the requesting user to the `admin` role for -that project. - -When a project request is submitted, the API substitutes the following -parameters into the template: - -.Default project template parameters -[cols="4,8",options="header"] -|=== -|Parameter |Description - -|`PROJECT_NAME` -|The name of the project. Required. - -|`PROJECT_DISPLAYNAME` -|The display name of the project. May be empty. - -|`PROJECT_DESCRIPTION` -|The description of the project. May be empty. - -|`PROJECT_ADMIN_USER` -|The user name of the administrating user. - -|`PROJECT_REQUESTING_USER` -|The user name of the requesting user. -|=== - -Access to the API is granted to developers with the `self-provisioner` role and -the `self-provisioners` cluster role binding. This role is available to all -authenticated developers by default. diff --git a/modules/about-redhat-openshift-gitops.adoc b/modules/about-redhat-openshift-gitops.adoc deleted file mode 100644 index 0aff1b216ab5..000000000000 --- a/modules/about-redhat-openshift-gitops.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module is included in the following assemblies: -// -// * openshift-docs/cicd/gitops/understanding-openshift-gitops.adoc - -:_content-type: CONCEPT -[id="about-redhat-openshift-gitops_{context}"] -= About {gitops-title} - -{gitops-title} ensures consistency in applications when you deploy them to different clusters in different environments, such as: development, staging, and production. {gitops-title} organizes the deployment process around the configuration repositories and makes them the central element. It always has at least two repositories: - - . Application repository with the source code - . Environment configuration repository that defines the desired state of the application - -These repositories contain a declarative description of the infrastructure you need in your specified environment. They also contain an automated process to make your environment match the described state. - -{gitops-title} uses Argo CD to maintain cluster resources. Argo CD is an open-source declarative tool for the continuous integration and continuous deployment (CI/CD) of applications. {gitops-title} implements Argo CD as a controller so that it continuously monitors application definitions and configurations defined in a Git repository. Then, Argo CD compares the specified state of these configurations with their live state on the cluster. - -Argo CD reports any configurations that deviate from their specified state. These reports allow administrators to automatically or manually resync configurations to the defined state. Therefore, Argo CD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters. - -[id="key-features_{context}"] -== Key features - -{gitops-title} helps you automate the following tasks: - -* Ensure that the clusters have similar states for configuration, monitoring, and storage -* Apply or revert configuration changes to multiple {product-title} clusters -* Associate templated configuration with different environments -* Promote applications across clusters, from staging to production diff --git a/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc b/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc deleted file mode 100644 index b69932c9f83c..000000000000 --- a/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc - -:_content-type: CONCEPT - -[id="about-scaling-a-user-provisioned-cluster-with-the-bare-metal-operator_{context}"] -= About scaling a user-provisioned cluster with the Bare Metal Operator - -You can scale user-provisioned infrastructure clusters by using the Bare Metal Operator (BMO) and other metal3 components. User-provisioned infrastructure installations do not feature the Machine API Operator. The Machine API Operator typically manages the lifecycle of bare-metal hosts in a cluster. However, it is possible to use the BMO and other metal3 components to scale nodes in user-provisioned clusters without requiring the Machine API Operator. diff --git a/modules/about-sosreport.adoc b/modules/about-sosreport.adoc deleted file mode 100644 index 6d819a84a840..000000000000 --- a/modules/about-sosreport.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * support/gathering-cluster-data.adoc - -:_content-type: CONCEPT -[id="about-sosreport_{context}"] -= About sosreport - -`sosreport` is a tool that collects configuration details, system information, and diagnostic data from {op-system-base-full} and {op-system-first} systems. `sosreport` provides a standardized way to collect diagnostic information relating to a node, which can then be provided to Red Hat Support for issue diagnosis. - -In some support interactions, Red Hat Support may ask you to collect a `sosreport` archive for a specific {product-title} node. For example, it might sometimes be necessary to review system logs or other node-specific data that is not included within the output of `oc adm must-gather`. diff --git a/modules/about-toolbox.adoc b/modules/about-toolbox.adoc deleted file mode 100644 index 486ab0e6950f..000000000000 --- a/modules/about-toolbox.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * support/gathering-cluster-data.adoc - -:_content-type: CONCEPT -[id="about-toolbox_{context}"] -= About `toolbox` - -ifndef::openshift-origin[] -`toolbox` is a tool that starts a container on a {op-system-first} system. The tool is primarily used to start a container that includes the required binaries and plugins that are needed to run commands such as `sosreport` and `redhat-support-tool`. - -The primary purpose for a `toolbox` container is to gather diagnostic information and to provide it to Red Hat Support. However, if additional diagnostic tools are required, you can add RPM packages or run an image that is an alternative to the standard support tools image. -endif::openshift-origin[] - -ifdef::openshift-origin[] -`toolbox` is a tool that starts a container on a {op-system-first} system. The tool is primarily used to start a container that includes the required binaries and plugins that are needed to run your favorite debugging or admin tools. -endif::openshift-origin[] diff --git a/modules/about-using-gpu-operator.adoc b/modules/about-using-gpu-operator.adoc deleted file mode 100644 index e9879d71049a..000000000000 --- a/modules/about-using-gpu-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/virtual_machines/advanced_vm_management/virt-configuring-mediated-devices.adoc - -:_content-type: CONCEPT -[id="about-using-nvidia-gpu_{context}"] -= About using the NVIDIA GPU Operator - -The NVIDIA GPU Operator manages NVIDIA GPU resources in an {product-title} cluster and automates tasks related to bootstrapping GPU nodes. -Since the GPU is a special resource in the cluster, you must install some components before deploying application workloads onto the GPU. -These components include the NVIDIA drivers which enables compute unified device architecture (CUDA), Kubernetes device plugin, container runtime and others such as automatic node labelling, monitoring and more. -[NOTE] -==== -The NVIDIA GPU Operator is supported only by NVIDIA. For more information about obtaining support from NVIDIA, see link:https://access.redhat.com/solutions/5174941[Obtaining Support from NVIDIA]. -==== - -There are two ways to enable GPUs with {product-title} {VirtProductName}: the {product-title}-native way described here and by using the NVIDIA GPU Operator. - -The NVIDIA GPU Operator is a Kubernetes Operator that enables {product-title} {VirtProductName} to expose GPUs to virtualized workloads running on {product-title}. -It allows users to easily provision and manage GPU-enabled virtual machines, providing them with the ability to run complex artificial intelligence/machine learning (AI/ML) workloads on the same platform as their other workloads. -It also provides an easy way to scale the GPU capacity of their infrastructure, allowing for rapid growth of GPU-based workloads. - -For more information about using the NVIDIA GPU Operator to provision worker nodes for running GPU-accelerated VMs, see link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/openshift-virtualization.html[NVIDIA GPU Operator with OpenShift Virtualization]. diff --git a/modules/about-ztp.adoc b/modules/about-ztp.adoc deleted file mode 100644 index f618e11dd7b3..000000000000 --- a/modules/about-ztp.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc - -:_content-type: CONCEPT -[id="about-ztp_{context}"] -= Using {ztp} to provision clusters at the network far edge - -{rh-rhacm-first} manages clusters in a hub-and-spoke architecture, where a single hub cluster manages many spoke clusters. Hub clusters running {rh-rhacm} provision and deploy the managed clusters by using {ztp-first} and the assisted service that is deployed when you install {rh-rhacm}. - -The assisted service handles provisioning of {product-title} on single node clusters, three-node clusters, or standard clusters running on bare metal. - -A high-level overview of using {ztp} to provision and maintain bare-metal hosts with {product-title} is as follows: - -* A hub cluster running {rh-rhacm} manages an {product-registry} that mirrors the {product-title} release images. {rh-rhacm} uses the {product-registry} to provision the managed clusters. - -* You manage the bare-metal hosts in a YAML format inventory file, versioned in a Git repository. - -* You make the hosts ready for provisioning as managed clusters, and use {rh-rhacm} and the assisted service to install the bare-metal hosts on site. - -Installing and deploying the clusters is a two-stage process, involving an initial installation phase, and a subsequent configuration phase. The following diagram illustrates this workflow: - -image::217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png[Using GitOps and {ztp} to install and deploy managed clusters] diff --git a/modules/access-cluster.adoc b/modules/access-cluster.adoc deleted file mode 100644 index 233466d4b87a..000000000000 --- a/modules/access-cluster.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="access-cluster_{context}"] -= Accessing your cluster - -After you have configured your identity providers, users can access the cluster from {cluster-manager-first}. - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. - -.Procedure - -. From {cluster-manager-url}, click on the cluster you want to access. - -. Click *Open Console*. - -. Click on your identity provider and provide your credentials to log into the cluster. - -. Click *Open console* to open the web console for your cluster. - -. Click on your identity provider and provide your credentials to log in to the cluster. Complete any authorization requests that are presented by your provider. diff --git a/modules/access-service.adoc b/modules/access-service.adoc deleted file mode 100644 index a48382e04b52..000000000000 --- a/modules/access-service.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="access-service_{context}"] - -= Accessing installed add-on services on your cluster - -After you successfully install an add-on service on your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster, you can access the service by using the OpenShift web console. - -.Prerequisites - -* You have successfully installed a service on your {product-title} cluster. - -.Procedure - -. Navigate to the *Clusters* page in {cluster-manager-url}. - -. Select the cluster with an installed service you want to access. - -. Navigate to the *Add-ons* tab, and locate the installed service that you want to access. - -. Click *View on console* from the service option to open the OpenShift web console. - -. Enter your credentials to log in to the OpenShift web console. - -. Click the *Red Hat Applications* menu by clicking the three-by-three matrix icon in the upper right corner of the main screen. - -. Select the service you want to open from the drop-down menu. A new browser tab opens and you are required to authenticate through Red Hat Single Sign-On. - -You have now accessed your service and can begin using it. diff --git a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc deleted file mode 100644 index 6952da48f917..000000000000 --- a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="accessing-an-example-node-tuning-operator-specification_{context}"] -= Accessing an example Node Tuning Operator specification - -Use this process to access an example Node Tuning Operator specification. - -.Procedure - - * Run the following command to access an example Node Tuning Operator specification: -+ -[source,terminal] ----- -$ oc get Tuned/default -o yaml -n openshift-cluster-node-tuning-operator ----- - -The default CR is meant for delivering standard node-level tuning for the {product-title} platform and it can only be modified to set the Operator Management state. Any other custom changes to the default CR will be overwritten by the Operator. For custom tuning, create your own Tuned CRs. Newly created CRs will be combined with the default CR and custom tuning applied to {product-title} nodes based on node or pod labels and profile priorities. - -[WARNING] -==== -While in certain situations the support for pod labels can be a convenient way of automatically delivering required tuning, this practice is discouraged and strongly advised against, especially in large-scale clusters. The default Tuned CR ships without pod label matching. If a custom profile is created with pod label matching, then the functionality will be enabled at that time. The pod label functionality will be deprecated in future versions of the Node Tuning Operator. -==== diff --git a/modules/accessing-hosts-on-aws.adoc b/modules/accessing-hosts-on-aws.adoc deleted file mode 100644 index bf9a54c876f2..000000000000 --- a/modules/accessing-hosts-on-aws.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/accessing-hosts.adoc - -:_content-type: PROCEDURE -[id="accessing-hosts-on-aws_{context}"] -= Accessing hosts on Amazon Web Services in an installer-provisioned infrastructure cluster - -The {product-title} installer does not create any public IP addresses for any of -the Amazon Elastic Compute Cloud (Amazon EC2) instances that it provisions for -your {product-title} cluster. To be able to SSH to your {product-title} -hosts, you must follow this procedure. - -.Procedure - -. Create a security group that allows SSH access into the virtual private cloud -(VPC) created by the `openshift-install` command. - -. Create an Amazon EC2 instance on one of the public subnets the installer -created. - -. Associate a public IP address with the Amazon EC2 instance that you created. -+ -Unlike with the {product-title} installation, you should associate the Amazon -EC2 instance you created with an SSH keypair. It does not matter what operating -system you choose for this instance, as it will simply serve as an SSH bastion -to bridge the internet into your {product-title} cluster's VPC. The Amazon -Machine Image (AMI) you use does matter. With {op-system-first}, -for example, you can provide keys via Ignition, like the installer does. - -. After you provisioned your Amazon EC2 instance and can SSH into it, you must add -the SSH key that you associated with your {product-title} installation. This key -can be different from the key for the bastion instance, but does not have to be. -+ -[NOTE] -==== -Direct SSH access is only recommended for disaster recovery. When the Kubernetes -API is responsive, run privileged pods instead. -==== - -. Run `oc get nodes`, inspect the output, and choose one of the nodes that is a -master. The hostname looks similar to `ip-10-0-1-163.ec2.internal`. - -. From the bastion SSH host you manually deployed into Amazon EC2, SSH into that -control plane host. Ensure that you use the same SSH key you specified during the -installation: -+ -[source,terminal] ----- -$ ssh -i core@ ----- diff --git a/modules/accessing-metrics-outside-cluster.adoc b/modules/accessing-metrics-outside-cluster.adoc deleted file mode 100644 index ba88f496261b..000000000000 --- a/modules/accessing-metrics-outside-cluster.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="accessing-metrics-from-outside-cluster_{context}"] -= Accessing metrics from outside the cluster for custom applications - -Learn how to query Prometheus statistics from the command line when monitoring your own services. You can access monitoring data from outside the cluster with the `thanos-querier` route. - -.Prerequisites - -* You deployed your own service, following the _Enabling monitoring for user-defined projects_ procedure. - -.Procedure - -. Extract a token to connect to Prometheus: -+ -[source,terminal] ----- -$ SECRET=`oc get secret -n openshift-user-workload-monitoring | grep prometheus-user-workload-token | head -n 1 | awk '{print $1 }'` ----- -+ -[source,terminal] ----- -$ TOKEN=`echo $(oc get secret $SECRET -n openshift-user-workload-monitoring -o json | jq -r '.data.token') | base64 -d` ----- - -. Extract your route host: -+ -[source,terminal] ----- -$ THANOS_QUERIER_HOST=`oc get route thanos-querier -n openshift-monitoring -o json | jq -r '.spec.host'` ----- - -. Query the metrics of your own services in the command line. For example: -+ -[source,terminal] ----- -$ NAMESPACE=ns1 ----- -+ -[source,terminal] ----- -$ curl -X GET -kG "https://$THANOS_QUERIER_HOST/api/v1/query?" --data-urlencode "query=up{namespace='$NAMESPACE'}" -H "Authorization: Bearer $TOKEN" ----- -+ -The output will show you the duration that your application pods have been up. -+ -.Example output -[source,terminal] ----- -{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","endpoint":"web","instance":"10.129.0.46:8080","job":"prometheus-example-app","namespace":"ns1","pod":"prometheus-example-app-68d47c4fb6-jztp2","service":"prometheus-example-app"},"value":[1591881154.748,"1"]}]}} ----- diff --git a/modules/accessing-running-pods.adoc b/modules/accessing-running-pods.adoc deleted file mode 100644 index 5513024f44b0..000000000000 --- a/modules/accessing-running-pods.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-running-pods_{context}"] -= Accessing running pods - -You can review running pods dynamically by opening a shell inside a pod or by gaining network access through port forwarding. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Switch into the project that contains the pod you would like to access. This is necessary because the `oc rsh` command does not accept the `-n` namespace option: -+ -[source,terminal] ----- -$ oc project ----- - -. Start a remote shell into a pod: -+ -[source,terminal] ----- -$ oc rsh <1> ----- -<1> If a pod has multiple containers, `oc rsh` defaults to the first container unless `-c ` is specified. - -. Start a remote shell into a specific container within a pod: -+ -[source,terminal] ----- -$ oc rsh -c pod/ ----- - -. Create a port forwarding session to a port on a pod: -+ -[source,terminal] ----- -$ oc port-forward : <1> ----- -<1> Enter `Ctrl+C` to cancel the port forwarding session. diff --git a/modules/accessing-windows-node-using-rdp.adoc b/modules/accessing-windows-node-using-rdp.adoc deleted file mode 100644 index 136d9cb95ee3..000000000000 --- a/modules/accessing-windows-node-using-rdp.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-windows-node-using-rdp_{context}"] -= Accessing a Windows node using RDP - -You can access a Windows node by using a Remote Desktop Protocol (RDP). - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. -* You have added the key used in the `cloud-private-key` secret and the key used when creating the cluster to the ssh-agent. For security reasons, remember to remove the keys from the ssh-agent after use. -* You have connected to the Windows node link:https://access.redhat.com/solutions/4073041[using an `ssh-bastion` pod]. - -.Procedure - -. Run the following command to set up an SSH tunnel: -+ -[source,terminal] ----- -$ ssh -L 2020::3389 \ <1> - core@$(oc get service --all-namespaces -l run=ssh-bastion -o go-template="{{ with (index (index .items 0).status.loadBalancer.ingress 0) }}{{ or .hostname .ip }}{{end}}") ----- -<1> Specify the internal IP address of the node, which can be discovered by running the following command: -+ -[source,terminal] ----- -$ oc get nodes -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address} ----- - -. From within the resulting shell, SSH into the Windows node and run the following command to create a password for the user: -+ -[source,terminal] ----- -C:\> net user * <1> ----- -<1> Specify the cloud provider user name, such as `Administrator` for AWS or `capi` for Azure. - -You can now remotely access the Windows node at `localhost:2020` using an RDP client. diff --git a/modules/accessing-windows-node-using-ssh.adoc b/modules/accessing-windows-node-using-ssh.adoc deleted file mode 100644 index 234e4fac65ff..000000000000 --- a/modules/accessing-windows-node-using-ssh.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-windows-node-using-ssh_{context}"] -= Accessing a Windows node using SSH - -You can access a Windows node by using a secure shell (SSH). - -.Prerequisites - -* You have installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. -* You have added the key used in the `cloud-private-key` secret and the key used when creating the cluster to the ssh-agent. For security reasons, remember to remove the keys from the ssh-agent after use. -* You have connected to the Windows node link:https://access.redhat.com/solutions/4073041[using an `ssh-bastion` pod]. - -.Procedure - -* Access the Windows node by running the following command: -+ -[source,terminal] ----- -$ ssh -t -o StrictHostKeyChecking=no -o ProxyCommand='ssh -A -o StrictHostKeyChecking=no \ - -o ServerAliveInterval=30 -W %h:%p core@$(oc get service --all-namespaces -l run=ssh-bastion \ - -o go-template="{{ with (index (index .items 0).status.loadBalancer.ingress 0) }}{{ or .hostname .ip }}{{end}}")' @ <1> <2> ----- -<1> Specify the cloud provider username, such as `Administrator` for Amazon Web Services (AWS) or `capi` for Microsoft Azure. -<2> Specify the internal IP address of the node, which can be discovered by running the following command: -+ -[source,terminal] ----- -$ oc get nodes -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address} ----- diff --git a/modules/add-user.adoc b/modules/add-user.adoc deleted file mode 100644 index ca7fba2406ed..000000000000 --- a/modules/add-user.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/quickstart-osd.adoc - -:_content-type: PROCEDURE -[id="add-user_{context}"] -= Adding a user - - -Administrator roles are managed using a `dedicated-admins` group on the cluster. You can add and remove users from {cluster-manager-first}. - -.Procedure - -. Navigate to the *Clusters* page and select the cluster you want to add users to. - -. Click the *Access control* tab. - -. Under the *Cluster administrative users* heading, click *Add User*. - -. Enter the user ID you want to add. - -. Click *Add user*. - -.Verification - -* You now see the user listed under the *Cluster administrative users* heading. diff --git a/modules/adding-a-custom-logo.adoc b/modules/adding-a-custom-logo.adoc deleted file mode 100644 index 72b86e611336..000000000000 --- a/modules/adding-a-custom-logo.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="adding-a-custom-logo_{context}"] -= Adding a custom logo and product name - -You can create custom branding by adding a custom logo or custom product name. You can set both or one without the other, as these settings are independent of each other. - -.Prerequisites - -* You must have administrator privileges. -* Create a file of the logo that you want to use. The logo can be a file in any common image format, including GIF, JPG, PNG, or SVG, and is constrained to a `max-height` of `60px`. - -.Procedure - -. Import your logo file into a config map in the `openshift-config` namespace: -+ -[source,terminal] ----- -$ oc create configmap console-custom-logo --from-file /path/to/console-custom-logo.png -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: console-custom-logo - namespace: openshift-config -binaryData: - console-custom-logo.png: ... <1> ----- -<1> Provide a valid base64-encoded logo. -==== - -. Edit the web console's Operator configuration to include `customLogoFile` and `customProductName`: -+ -[source,terminal] ----- -$ oc edit consoles.operator.openshift.io cluster ----- -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -spec: - customization: - customLogoFile: - key: console-custom-logo.png - name: console-custom-logo - customProductName: My Console ----- -+ -Once the Operator configuration is updated, it will sync the custom logo config map into the console namespace, mount it to the console pod, and redeploy. - -. Check for success. If there are any issues, the console cluster Operator will report a `Degraded` status, and the console Operator configuration will also report a `CustomLogoDegraded` status, but with reasons like `KeyOrFilenameInvalid` or `NoImageProvided`. -+ -To check the `clusteroperator`, run: -+ -[source,terminal] ----- -$ oc get clusteroperator console -o yaml ----- -+ -To check the console Operator configuration, run: -+ -[source,terminal] ----- -$ oc get consoles.operator.openshift.io -o yaml ----- diff --git a/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc b/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc deleted file mode 100644 index dea67e07e3e3..000000000000 --- a/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="adding-bare-metal-host-to-cluster-using-web-console_{context}"] -= Adding a bare metal host to the cluster using the web console - -You can add bare metal hosts to the cluster in the web console. - -.Prerequisites - -* Install an {op-system} cluster on bare metal. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the web console, navigate to *Compute* -> *Bare Metal Hosts*. -. Select *Add Host* -> *New with Dialog*. -. Specify a unique name for the new bare metal host. -. Set the *Boot MAC address*. -. Set the *Baseboard Management Console (BMC) Address*. -. Enter the user credentials for the host's baseboard management controller (BMC). -. Select to power on the host after creation, and select *Create*. -. Scale up the number of replicas to match the number of available bare metal hosts. Navigate to *Compute* -> *MachineSets*, and increase the number of machine replicas in the cluster by selecting *Edit Machine count* from the *Actions* drop-down menu. - -[NOTE] -==== -You can also manage the number of bare metal nodes using the `oc scale` command and the appropriate bare metal compute machine set. -==== diff --git a/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc b/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc deleted file mode 100644 index 50665bc96ce2..000000000000 --- a/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="adding-bare-metal-host-to-cluster-using-yaml_{context}"] -= Adding a bare metal host to the cluster using YAML in the web console - -You can add bare metal hosts to the cluster in the web console using a YAML file that describes the bare metal host. - -.Prerequisites - -* Install a {op-system} compute machine on bare metal infrastructure for use in the cluster. -* Log in as a user with `cluster-admin` privileges. -* Create a `Secret` CR for the bare metal host. - -.Procedure - -. In the web console, navigate to *Compute* -> *Bare Metal Hosts*. -. Select *Add Host* -> *New from YAML*. -. Copy and paste the below YAML, modifying the relevant fields with the details of your host: -+ -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - name: -spec: - online: true - bmc: - address: - credentialsName: <1> - disableCertificateVerification: True <2> - bootMACAddress: ----- -+ -<1> `credentialsName` must reference a valid `Secret` CR. The `baremetal-operator` cannot manage the bare metal host without a valid `Secret` referenced in the `credentialsName`. For more information about secrets and how to create them, see xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about_nodes-pods-secrets[Understanding secrets]. -<2> Setting `disableCertificateVerification` to `true` disables TLS host validation between the cluster and the baseboard management controller (BMC). - -. Select *Create* to save the YAML and create the new bare metal host. -. Scale up the number of replicas to match the number of available bare metal hosts. Navigate to *Compute* -> *MachineSets*, and increase the number of machines in the cluster by selecting *Edit Machine count* from the *Actions* drop-down menu. -+ -[NOTE] -==== -You can also manage the number of bare metal nodes using the `oc scale` command and the appropriate bare metal compute machine set. -==== diff --git a/modules/adding-cluster-notification-contacts.adoc b/modules/adding-cluster-notification-contacts.adoc deleted file mode 100644 index 655864015282..000000000000 --- a/modules/adding-cluster-notification-contacts.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_logging/rosa-accessing-the-service-logs.adoc -// * osd_cluster_admin/osd_logging/osd-accessing-the-service-logs.adoc -// * logging/sd-accessing-the-service-logs.adoc - -:_content-type: PROCEDURE -[id="adding-cluster-notification-contacts_{context}"] -= Adding cluster notification contacts - -You can add notification contacts for your -ifdef::openshift-dedicated[] -{product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -{product-title} (ROSA) -endif::openshift-rosa[] -cluster. When an event occurs that triggers a cluster notification email, subscribed users are notified. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. On the *Support* tab, under the *Notification contacts* heading, click *Add notification contact*. - -. Enter the Red Hat username or email of the contact you want to add. -+ -[NOTE] -==== -The username or email address must relate to a user account in the Red Hat organization where the cluster is deployed. -==== - -. Click *Add contact*. - -.Verification - -* You see a confirmation message when you have successfully added the contact. The user appears under the *Notification contacts* heading on the *Support* tab. diff --git a/modules/adding-custom-notification-banners.adoc b/modules/adding-custom-notification-banners.adoc deleted file mode 100644 index 15ddd52527d7..000000000000 --- a/modules/adding-custom-notification-banners.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="creating-custom-notification-banners_{context}"] -= Creating custom notification banners - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on -*ConsoleNotification*. -. Select *Instances* tab -. Click *Create Console Notification* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleNotification -metadata: - name: example -spec: - text: This is an example notification message with an optional link. - location: BannerTop <1> - link: - href: 'https://www.example.com' - text: Optional link text - color: '#fff' - backgroundColor: '#0088ce' ----- -<1> Valid location settings are `BannerTop`, `BannerBottom`, and `BannerTopBottom`. - -. Click *Create* to apply your changes. diff --git a/modules/adding-service-existing.adoc b/modules/adding-service-existing.adoc deleted file mode 100644 index 62e20985eca8..000000000000 --- a/modules/adding-service-existing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="adding-service-existing_{context}"] - -= Adding an add-on service to a cluster - -You can add an add-on service to an existing {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -.Prerequisites - -* You have created and provisioned a cluster for {product-title}. -* Your cluster meets all of the prerequisites for the service that you want to add on to your cluster. -* For paid add-on services, note the following considerations: -** If the organization has sufficient quota, and if the service is compatible with the cluster, the service appears in {cluster-manager}. -** If the organization has never had quota, or if the cluster is not compatible, then the service does not display. -** If the organization had quota in the past, but the quota is currently `0`, the service is still visible but disabled in {cluster-manager} until you get more quota. - -// TODO: Could this just be one of the above prereqs instead of its own NOTE? -[NOTE] -==== -To add a service to a cluster, you must be the cluster owner. -==== - -.Procedure - -. Navigate to the *Clusters* page in {cluster-manager-url}. - -. Select the cluster you want to add a service to. - -. Click the *Add-ons* tab. - -. Click the service option you want to add, click *Install*. An installing icon appears, indicating that the service has begun installing. -+ -A green check mark appears in the service option when the installation is complete. You might have to refresh your browser to see the installation status. - -. When the service is *Installed*, click *View in console* to access the service. diff --git a/modules/adding-tab-pods-page.adoc b/modules/adding-tab-pods-page.adoc deleted file mode 100644 index a51172c1fd69..000000000000 --- a/modules/adding-tab-pods-page.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/dynamic-plugin-example.adoc - -:_content-type: PROCEDURE -[id="adding-tab-to-pods-page_{context}"] -= Adding a tab to the pods page - -There are different customizations you can make to the {product-title} web console. The following procedure adds a tab to the *Pod details* page as an example extension to your plugin. - -[NOTE] -==== -The {product-title} web console runs in a container connected to the cluster you have logged into. See "Dynamic plugin development" for information to test the plugin before creating your own. -==== - -.Procedure - -. Visit the link:https://github.com/openshift/console-plugin-template[`console-plugin-template`] repository containing a template for creating plugins in a new tab. -+ -[IMPORTANT] -==== -Custom plugin code is not supported by Red Hat. Only link:https://access.redhat.com/solutions/5893251[Cooperative community support] is available for your plugin. -==== - -. Create a GitHub repository for the template by clicking *Use this template* -> *_Create new repository_*. - -. Rename the new repository with the name of your plugin. - -. Clone the new repository to your local machine so you can edit the code. - -. Edit the `package.json` file, adding your plugin's metadata to the `consolePlugin` declaration. For example: -+ -[source,json] - ----- -"consolePlugin": { - "name": "my-plugin", <1> - "version": "0.0.1", <2> - "displayName": "My Plugin", <3> - "description": "Enjoy this shiny, new console plugin!", <4> - "exposedModules": { - "ExamplePage": "./components/ExamplePage" - }, - "dependencies": { - "@console/pluginAPI": "/*" - } -} ----- -<1> Update the name of your plugin. -<2> Update the version. -<3> Update the display name for your plugin. -<4> Update the description with a synopsis about your plugin. - -. Add the following to the `console-extensions.json` file: -+ -[source,json] - ----- -{ - "type": "console.tab/horizontalNav", - "properties": { - "page": { - "name": "Example Tab", - "href": "example" - }, - "model": { - "group": "core", - "version": "v1", - "kind": "Pod" - }, - "component": { "$codeRef": "ExampleTab" } - } -} ----- - -. Edit the `package.json` file to include the following changes: -+ -[source,json] - ----- - "exposedModules": { - "ExamplePage": "./components/ExamplePage", - "ExampleTab": "./components/ExampleTab" - } ----- - -. Write a message to display on a new custom tab on the *Pods* page by creating a new file `src/components/ExampleTab.tsx` and adding the following script: -+ -[source,tsx] - ----- -import * as React from 'react'; - -export default function ExampleTab() { - return ( -

This is a custom tab added to a resource using a dynamic plugin.

- ); -} ----- - -. Install a Helm chart with the name of the plugin as the Helm release name into a new namespace or an existing namespace as specified by the `-n` command-line option to deploy your plugin on a cluster. Provide the location of the image within the `plugin.image` parameter by using the following command: - -+ -[source,terminal] ----- -$ helm upgrade -i my-plugin charts openshift-console-plugin -n my-plugin-namespace --create-namespace --set plugin image=my-plugin-image-location ----- -+ -[NOTE] -==== -For more information on deploying your plugin on a cluster, see "Deploy your plugin on a cluster". -==== - -.Verification -* Visit a *Pod* page to view the added tab. diff --git a/modules/adding-tls-termination.adoc b/modules/adding-tls-termination.adoc deleted file mode 100644 index 2192b5d6b3f9..000000000000 --- a/modules/adding-tls-termination.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/add-tls-termination.adoc - -:_content-type: PROCEDURE -[id="nw-adding-tls-termination_{context}"] -= Adding TLS termination on the AWS Load Balancer - -You can route the traffic for the domain to pods of a service and add TLS termination on the AWS Load Balancer. - -.Prerequisites - -* You have an access to the OpenShift CLI (`oc`). - -.Procedure - -. Install the Operator and create an instance of the `aws-load-balancer-controller` resource: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController -metadata: - name: cluster <1> -spec: - subnetTagging: auto - ingressClass: tls-termination <2> ----- -<1> Defines the `aws-load-balancer-controller` instance. -<2> Defines the name of an `ingressClass` resource reconciled by the AWS Load Balancer Controller. This `ingressClass` resource gets created if it is not present. You can add additional `ingressClass` values. The controller reconciles the `ingressClass` values if the `spec.controller` is set to `ingress.k8s.aws/alb`. - -. Create an `Ingress` resource: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: <1> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing <2> - alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-west-2:xxxxx <3> -spec: - ingressClassName: tls-termination <4> - rules: - - host: <5> - http: - paths: - - path: / - pathType: Exact - backend: - service: - name: <6> - port: - number: 80 ----- -<1> Specifies the name of an ingress. -<2> The controller provisions the load balancer for this `Ingress` resource in a public subnet so that the load balancer is reachable over the internet. -<3> The Amazon Resource Name of the certificate that you attach to the load balancer. -<4> Defines the ingress class name. -<5> Defines the domain for traffic routing. -<6> Defines the service for traffic routing. diff --git a/modules/adding-to-a-project.adoc b/modules/adding-to-a-project.adoc deleted file mode 100644 index 5cc909680bb7..000000000000 --- a/modules/adding-to-a-project.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="adding-to-a-project_{context}"] -= Adding to a project - -.Procedure - -. Select *Developer* from the context selector at the top of the web console -navigation menu. - -. Click *+Add* - -. At the top of the page, select the name of the project that you want to add to. - -. Click a method for adding to your project, and then follow the workflow. - -[NOTE] -==== -You can also add components to the topology using quick search. -==== diff --git a/modules/adding-yaml-examples-to-kube-resources.adoc b/modules/adding-yaml-examples-to-kube-resources.adoc deleted file mode 100644 index d179f415b02e..000000000000 --- a/modules/adding-yaml-examples-to-kube-resources.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="adding-yaml-examples-to-kube-resources_{context}"] -= Adding YAML examples to Kubernetes resources - -You can dynamically add YAML examples to any Kubernetes resources at any time. - -.Prerequisites - -* You must have cluster administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on *ConsoleYAMLSample*. - -. Click *YAML* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleYAMLSample -metadata: - name: example -spec: - targetResource: - apiVersion: batch/v1 - kind: Job - title: Example Job - description: An example Job YAML sample - yaml: | - apiVersion: batch/v1 - kind: Job - metadata: - name: countdown - spec: - template: - metadata: - name: countdown - spec: - containers: - - name: counter - image: centos:7 - command: - - "bin/bash" - - "-c" - - "for i in 9 8 7 6 5 4 3 2 1 ; do echo $i ; done" - restartPolicy: Never ----- -Use `spec.snippet` to indicate that the YAML sample is not the full YAML resource -definition, but a fragment that can be inserted into the existing YAML document -at the user's cursor. - -. Click *Save*. diff --git a/modules/admin-credentials-root-secret-formats.adoc b/modules/admin-credentials-root-secret-formats.adoc deleted file mode 100644 index 7bd05415f3d4..000000000000 --- a/modules/admin-credentials-root-secret-formats.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc - -ifeval::["{context}" == "cco-mode-mint"] -:mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:passthrough: -endif::[] - -:_content-type: REFERENCE -[id="admin-credentials-root-secret-formats_{context}"] -= Admin credentials root secret format - -Each cloud provider uses a credentials root secret in the `kube-system` -namespace by convention, which is then used to satisfy all credentials requests -and create their respective secrets. -This is done either by minting new credentials with _mint mode_, or by copying the credentials root secret with _passthrough mode_. - -The format for the secret varies by cloud, and is also used for each -`CredentialsRequest` secret. - -.Amazon Web Services (AWS) secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: aws-creds -stringData: - aws_access_key_id: - aws_secret_access_key: ----- - -ifdef::passthrough[] - -.Microsoft Azure secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: azure-credentials -stringData: - azure_subscription_id: - azure_client_id: - azure_client_secret: - azure_tenant_id: - azure_resource_prefix: - azure_resourcegroup: - azure_region: ----- - -On Microsoft Azure, the credentials secret format includes two properties that must contain the cluster's infrastructure ID, generated randomly for each cluster installation. This value can be found after running create manifests: - -[source,terminal] ----- -$ cat .openshift_install_state.json | jq '."*installconfig.ClusterID".InfraID' -r ----- - -.Example output -[source,terminal] ----- -mycluster-2mpcn ----- - -This value would be used in the secret data as follows: - -[source,yaml] ----- -azure_resource_prefix: mycluster-2mpcn -azure_resourcegroup: mycluster-2mpcn-rg ----- -endif::passthrough[] - -.Google Cloud Platform (GCP) secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: gcp-credentials -stringData: - service_account.json: ----- - -ifdef::passthrough[] - -.{rh-openstack-first} secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: openstack-credentials -data: - clouds.yaml: - clouds.conf: ----- - -.VMware vSphere secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: vsphere-creds -data: - vsphere.openshift.example.com.username: - vsphere.openshift.example.com.password: ----- - -endif::passthrough[] - -ifeval::["{context}" == "cco-mode-mint"] -:!mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:!passthrough: -endif::[] diff --git a/modules/admission-plug-ins-about.adoc b/modules/admission-plug-ins-about.adoc deleted file mode 100644 index 0616ddd904c3..000000000000 --- a/modules/admission-plug-ins-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -:_content-type: CONCEPT -[id="admission-plug-ins-about_{context}"] -= About admission plugins - -Admission plugins are used to help regulate how {product-title} {product-version} functions. Admission plugins intercept requests to the master API to validate resource requests and ensure policies are adhered to, after the request is authenticated and authorized. For example, they are commonly used to enforce security policy, resource limitations or configuration requirements. - -Admission plugins run in sequence as an admission chain. If any admission plugin in the sequence rejects a request, the whole chain is aborted and an error is returned. - -{product-title} has a default set of admission plugins enabled for each resource type. These are required for proper functioning of the cluster. Admission plugins ignore resources that they are not responsible for. - -In addition to the defaults, the admission chain can be extended dynamically through webhook admission plugins that call out to custom webhook servers. There are two types of webhook admission plugins: a mutating admission plugin and a validating admission plugin. The mutating admission plugin runs first and can both modify resources and validate requests. The validating admission plugin validates requests and runs after the mutating admission plugin so that modifications triggered by the mutating admission plugin can also be validated. - -Calling webhook servers through a mutating admission plugin can produce side effects on resources related to the target object. In such situations, you must take steps to validate that the end result is as expected. - -[WARNING] -==== -Dynamic admission should be used cautiously because it impacts cluster control plane operations. When calling webhook servers through webhook admission plugins in {product-title} {product-version}, ensure that you have read the documentation fully and tested for side effects of mutations. Include steps to restore resources back to their original state prior to mutation, in the event that a request does not pass through the entire admission chain. -==== diff --git a/modules/admission-plug-ins-default.adoc b/modules/admission-plug-ins-default.adoc deleted file mode 100644 index 6ef96178cbc8..000000000000 --- a/modules/admission-plug-ins-default.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-plug-ins-default_{context}"] -= Default admission plugins - -//Future xref - A set of default admission plugins is enabled in {product-title} {product-version}. These default plugins contribute to fundamental control plane functionality, such as ingress policy, xref:../nodes/clusters/nodes-cluster-overcommit.adoc#nodes-cluster-resource-override_nodes-cluster-overcommit[cluster resource limit override] and quota policy. -Default validating and admission plugins are enabled in {product-title} {product-version}. These default plugins contribute to fundamental control plane functionality, such as ingress policy, cluster resource limit override and quota policy. The following lists contain the default admission plugins: - -.Validating admission plugins -[%collapsible] -==== -* `LimitRanger` -* `ServiceAccount` -* `PodNodeSelector` -* `Priority` -* `PodTolerationRestriction` -* `OwnerReferencesPermissionEnforcement` -* `PersistentVolumeClaimResize` -* `RuntimeClass` -* `CertificateApproval` -* `CertificateSigning` -* `CertificateSubjectRestriction` -* `autoscaling.openshift.io/ManagementCPUsOverride` -* `authorization.openshift.io/RestrictSubjectBindings` -* `scheduling.openshift.io/OriginPodNodeEnvironment` -* `network.openshift.io/ExternalIPRanger` -* `network.openshift.io/RestrictedEndpointsAdmission` -* `image.openshift.io/ImagePolicy` -* `security.openshift.io/SecurityContextConstraint` -* `security.openshift.io/SCCExecRestrictions` -* `route.openshift.io/IngressAdmission` -* `config.openshift.io/ValidateAPIServer` -* `config.openshift.io/ValidateAuthentication` -* `config.openshift.io/ValidateFeatureGate` -* `config.openshift.io/ValidateConsole` -* `operator.openshift.io/ValidateDNS` -* `config.openshift.io/ValidateImage` -* `config.openshift.io/ValidateOAuth` -* `config.openshift.io/ValidateProject` -* `config.openshift.io/DenyDeleteClusterConfiguration` -* `config.openshift.io/ValidateScheduler` -* `quota.openshift.io/ValidateClusterResourceQuota` -* `security.openshift.io/ValidateSecurityContextConstraints` -* `authorization.openshift.io/ValidateRoleBindingRestriction` -* `config.openshift.io/ValidateNetwork` -* `operator.openshift.io/ValidateKubeControllerManager` -* `ValidatingAdmissionWebhook` -* `ResourceQuota` -* `quota.openshift.io/ClusterResourceQuota` -==== - - -.Mutating admission plugins -[%collapsible] -==== -* `NamespaceLifecycle` -* `LimitRanger` -* `ServiceAccount` -* `NodeRestriction` -* `TaintNodesByCondition` -* `PodNodeSelector` -* `Priority` -* `DefaultTolerationSeconds` -* `PodTolerationRestriction` -* `DefaultStorageClass` -* `StorageObjectInUseProtection` -* `RuntimeClass` -* `DefaultIngressClass` -* `autoscaling.openshift.io/ManagementCPUsOverride` -* `scheduling.openshift.io/OriginPodNodeEnvironment` -* `image.openshift.io/ImagePolicy` -* `security.openshift.io/SecurityContextConstraint` -* `security.openshift.io/DefaultSecurityContextConstraints` -* `MutatingAdmissionWebhook` -==== diff --git a/modules/admission-webhook-types.adoc b/modules/admission-webhook-types.adoc deleted file mode 100644 index b00f98cb2794..000000000000 --- a/modules/admission-webhook-types.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-webhook-types_{context}"] -= Types of webhook admission plugins - -Cluster administrators can call out to webhook servers through the mutating admission plugin or the validating admission plugin in the API server admission chain. - -[id="mutating-admission-plug-in_{context}"] -== Mutating admission plugin - -The mutating admission plugin is invoked during the mutation phase of the admission process, which allows modification of resource content before it is persisted. One example webhook that can be called through the mutating admission plugin is the Pod Node Selector feature, which uses an annotation on a namespace to find a label selector and add it to the pod specification. - -[id="mutating-admission-plug-in-config_{context}"] -.Sample mutating admission plugin configuration - -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration <1> -metadata: - name: <2> -webhooks: -- name: <3> - clientConfig: <4> - service: - namespace: default <5> - name: kubernetes <6> - path: <7> - caBundle: <8> - rules: <9> - - operations: <10> - - - apiGroups: - - "" - apiVersions: - - "*" - resources: - - - failurePolicy: <11> - sideEffects: None ----- - -<1> Specifies a mutating admission plugin configuration. -<2> The name for the `MutatingWebhookConfiguration` object. Replace `` with the appropriate value. -<3> The name of the webhook to call. Replace `` with the appropriate value. -<4> Information about how to connect to, trust, and send data to the webhook server. -<5> The namespace where the front-end service is created. -<6> The name of the front-end service. -<7> The webhook URL used for admission requests. Replace `` with the appropriate value. -<8> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `` with the appropriate certificate in base64 format. -<9> Rules that define when the API server should use this webhook admission plugin. -<10> One or more operations that trigger the API server to call this webhook admission plugin. Possible values are `create`, `update`, `delete` or `connect`. Replace `` and `` with the appropriate values. -<11> Specifies how the policy should proceed if the webhook server is unavailable. -Replace `` with either `Ignore` (to unconditionally accept the request in the event of a failure) or `Fail` (to deny the failed request). Using `Ignore` can result in unpredictable behavior for all clients. - -[IMPORTANT] -==== -In {product-title} {product-version}, objects created by users or control loops through a mutating admission plugin might return unexpected results, especially if values set in an initial request are overwritten, which is not recommended. -==== - -[id="validating-admission-plug-in_{context}"] -== Validating admission plugin - -A validating admission plugin is invoked during the validation phase of the admission process. This phase allows the enforcement of invariants on particular API resources to ensure that the resource does not change again. The Pod Node Selector is also an example of a webhook which is called by the validating admission plugin, to ensure that all `nodeSelector` fields are constrained by the node selector restrictions on the namespace. - -[id="validating-admission-plug-in-config_{context}"] -//http://blog.kubernetes.io/2018/01/extensible-admission-is-beta.html -.Sample validating admission plugin configuration - -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration <1> -metadata: - name: <2> -webhooks: -- name: <3> - clientConfig: <4> - service: - namespace: default <5> - name: kubernetes <6> - path: <7> - caBundle: <8> - rules: <9> - - operations: <10> - - - apiGroups: - - "" - apiVersions: - - "*" - resources: - - - failurePolicy: <11> - sideEffects: Unknown ----- - -<1> Specifies a validating admission plugin configuration. -<2> The name for the `ValidatingWebhookConfiguration` object. Replace `` with the appropriate value. -<3> The name of the webhook to call. Replace `` with the appropriate value. -<4> Information about how to connect to, trust, and send data to the webhook server. -<5> The namespace where the front-end service is created. -<6> The name of the front-end service. -<7> The webhook URL used for admission requests. Replace `` with the appropriate value. -<8> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `` with the appropriate certificate in base64 format. -<9> Rules that define when the API server should use this webhook admission plugin. -<10> One or more operations that trigger the API server to call this webhook admission plugin. Possible values are `create`, `update`, `delete` or `connect`. Replace `` and `` with the appropriate values. -<11> Specifies how the policy should proceed if the webhook server is unavailable. -Replace `` with either `Ignore` (to unconditionally accept the request in the event of a failure) or `Fail` (to deny the failed request). Using `Ignore` can result in unpredictable behavior for all clients. diff --git a/modules/admission-webhooks-about.adoc b/modules/admission-webhooks-about.adoc deleted file mode 100644 index 6a8a92b02180..000000000000 --- a/modules/admission-webhooks-about.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-webhooks-about_{context}"] -= Webhook admission plugins - -In addition to {product-title} default admission plugins, dynamic admission can be implemented through webhook admission plugins that call webhook servers, to extend the functionality of the admission chain. Webhook servers are called over HTTP at defined endpoints. - -There are two types of webhook admission plugins in {product-title}: - -//Future xref - * During the admission process, xref:../architecture/admission-plug-ins.adoc#mutating-admission-plug-in[the mutating admission plugin] can perform tasks, such as injecting affinity labels. -* During the admission process, the _mutating admission plugin_ can perform tasks, such as injecting affinity labels. - -//Future xref - * At the end of the admission process, xref:../architecture/admission-plug-ins.adoc#validating-admission-plug-in[the validating admission plugin] makes sure an object is configured properly, for example ensuring affinity labels are as expected. If the validation passes, {product-title} schedules the object as configured. -* At the end of the admission process, the _validating admission plugin_ can be used to make sure an object is configured properly, for example ensuring affinity labels are as expected. If the validation passes, {product-title} schedules the object as configured. - -When an API request comes in, mutating or validating admission plugins use the list of external webhooks in the configuration and call them in parallel: - -* If all of the webhooks approve the request, the admission chain continues. - -* If any of the webhooks deny the request, the admission request is denied and the reason for doing so is based on the first denial. - -* If more than one webhook denies the admission request, only the first denial reason is returned to the user. - -* If an error is encountered when calling a webhook, the request is either denied or the webhook is ignored depending on the error policy set. If the error policy is set to `Ignore`, the request is unconditionally accepted in the event of a failure. If the policy is set to `Fail`, failed requests are denied. Using `Ignore` can result in unpredictable behavior for all clients. - -//Future xrefs - Communication between the webhook admission plugin and the webhook server must use TLS. Generate a certificate authority (CA) certificate and use the certificate to sign the server certificate that is used by your webhook server. The PEM-encoded CA certificate is supplied to the webhook admission plugin using a mechanism, such as xref:../security/certificates/service-serving-certificate.adoc#service-serving-certificate[service serving certificate secrets]. -Communication between the webhook admission plugin and the webhook server must use TLS. Generate a CA certificate and use the certificate to sign the server certificate that is used by your webhook admission server. The PEM-encoded CA certificate is supplied to the webhook admission plugin using a mechanism, such as service serving certificate secrets. - -The following diagram illustrates the sequential admission chain process within which multiple webhook servers are called. - -.API admission chain with mutating and validating admission plugins -image::api-admission-chain.png["API admission stage", align="center"] - -An example webhook admission plugin use case is where all pods must have a common set of labels. In this example, the mutating admission plugin can inject labels and the validating admission plugin can check that labels are as expected. {product-title} would subsequently schedule pods that include required labels and reject those that do not. - -Some common webhook admission plugin use cases include: - -//Future xref - * Namespace reservation. -* Namespace reservation. -//Future xrefs - * :../networking/hardware_networks/configuring-sriov-operator.adoc#configuring-sriov-operator[Limiting custom network resources managed by the SR-IOV network device plugin]. -* Limiting custom network resources managed by the SR-IOV network device plugin. -//Future xref - * xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Defining tolerations that enable taints to qualify which pods should be scheduled on a node]. -* Defining tolerations that enable taints to qualify which pods should be scheduled on a node. -//Future xref - * xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority class validation]. -* Pod priority class validation. - -[NOTE] -==== -The maximum default webhook timeout value in {product-title} is 13 seconds, and it cannot be changed. -==== diff --git a/modules/advanced-node-tuning-hosted-cluster.adoc b/modules/advanced-node-tuning-hosted-cluster.adoc deleted file mode 100644 index 750ff762b47d..000000000000 --- a/modules/advanced-node-tuning-hosted-cluster.adoc +++ /dev/null @@ -1,155 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -:_content-type: PROCEDURE -[id="advanced-node-tuning-hosted-cluster_{context}"] -= Advanced node tuning for hosted clusters by setting kernel boot parameters - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -For more advanced tuning in hosted control planes, which requires setting kernel boot parameters, you can also use the Node Tuning Operator. The following example shows how you can create a node pool with huge pages reserved. - -.Procedure - -. Create a `ConfigMap` object that contains a `Tuned` object manifest for creating 10 huge pages that are 2 MB in size. Save this `ConfigMap` manifest in a file named `tuned-hugepages.yaml`: -+ -[source,yaml] ----- - apiVersion: v1 - kind: ConfigMap - metadata: - name: tuned-hugepages - namespace: clusters - data: - tuning: | - apiVersion: tuned.openshift.io/v1 - kind: Tuned - metadata: - name: hugepages - namespace: openshift-cluster-node-tuning-operator - spec: - profile: - - data: | - [main] - summary=Boot time configuration for hugepages - include=openshift-node - [bootloader] - cmdline_openshift_node_hugepages=hugepagesz=2M hugepages=50 - name: openshift-node-hugepages - recommend: - - priority: 20 - profile: openshift-node-hugepages ----- -+ -[NOTE] -==== -The `.spec.recommend.match` field is intentionally left blank. In this case, this `Tuned` object is applied to all nodes in the node pool where this `ConfigMap` object is referenced. Group nodes with the same hardware configuration into the same node pool. Otherwise, TuneD operands can calculate conflicting kernel parameters for two or more nodes that share the same node pool. -==== - -. Create the `ConfigMap` object in the management cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$MGMT_KUBECONFIG" create -f tuned-hugepages.yaml ----- - -. Create a `NodePool` manifest YAML file, customize the upgrade type of the `NodePool`, and reference the `ConfigMap` object that you created in the `spec.tuningConfig` section. Create the `NodePool` manifest and save it in a file named `hugepages-nodepool.yaml` by using the `hypershift` CLI: -+ -[source,yaml] ----- - NODEPOOL_NAME=hugepages-example - INSTANCE_TYPE=m5.2xlarge - NODEPOOL_REPLICAS=2 - - hypershift create nodepool aws \ - --cluster-name $CLUSTER_NAME \ - --name $NODEPOOL_NAME \ - --node-count $NODEPOOL_REPLICAS \ - --instance-type $INSTANCE_TYPE \ - --render > hugepages-nodepool.yaml ----- - -. In the `hugepages-nodepool.yaml` file, set `.spec.management.upgradeType` to `InPlace`, and set `.spec.tuningConfig` to reference the `tuned-hugepages` `ConfigMap` object that you created. -+ -[source,yaml] ----- - apiVersion: hypershift.openshift.io/v1alpha1 - kind: NodePool - metadata: - name: hugepages-nodepool - namespace: clusters - ... - spec: - management: - ... - upgradeType: InPlace - ... - tuningConfig: - - name: tuned-hugepages ----- -+ -[NOTE] -==== -To avoid the unnecessary re-creation of nodes when you apply the new `MachineConfig` objects, set `.spec.management.upgradeType` to `InPlace`. If you use the `Replace` upgrade type, nodes are fully deleted and new nodes can replace them when you apply the new kernel boot parameters that the TuneD operand calculated. -==== - -. Create the `NodePool` in the management cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$MGMT_KUBECONFIG" create -f hugepages-nodepool.yaml ----- - -.Verification - -After the nodes are available, the containerized TuneD daemon calculates the required kernel boot parameters based on the applied TuneD profile. After the nodes are ready and reboot once to apply the generated `MachineConfig` object, you can verify that the TuneD profile is applied and that the kernel boot parameters are set. - -. List the `Tuned` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Tuneds -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -default 123m -hugepages-8dfb1fed 1m23s -rendered 123m ----- - -. List the `Profile` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Profiles -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME TUNED APPLIED DEGRADED AGE -nodepool-1-worker-1 openshift-node True False 132m -nodepool-1-worker-2 openshift-node True False 131m -hugepages-nodepool-worker-1 openshift-node-hugepages True False 4m8s -hugepages-nodepool-worker-2 openshift-node-hugepages True False 3m57s ----- -+ -Both of the worker nodes in the new `NodePool` have the `openshift-node-hugepages` profile applied. - -. To confirm that the tuning was applied correctly, start a debug shell on a node and check `/proc/cmdline`. -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" debug node/nodepool-1-worker-1 -- chroot /host cat /proc/cmdline ----- -+ -.Example output -[source,terminal] ----- -BOOT_IMAGE=(hd0,gpt3)/ostree/rhcos-... hugepagesz=2M hugepages=50 ----- \ No newline at end of file diff --git a/modules/agent-install-about-mirroring-for-disconnected-registry.adoc b/modules/agent-install-about-mirroring-for-disconnected-registry.adoc deleted file mode 100644 index 2e041c2cbdc6..000000000000 --- a/modules/agent-install-about-mirroring-for-disconnected-registry.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// * installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc -// re-use of applicable content from disconnected install mirroring - -:_content-type: CONCEPT -[id="agent-install-about-mirroring-for-disconnected-registry_{context}"] -= About mirroring the {product-title} image repository for a disconnected registry - -To use mirror images for a disconnected installation with the Agent-based Installer, you must modify the `install-config.yaml` file. - -You can mirror the release image by using the output of either the `oc adm release mirror` or `oc mirror` command. -This is dependent on which command you used to set up the mirror registry. - -The following example shows the output of the `oc adm release mirror` command. - -[source,terminal] ----- -$ oc adm release mirror ----- - -.Example output - -[source,terminal] ----- -To use the new mirrored repository to install, add the following -section to the install-config.yaml: - -imageContentSources: - -mirrors: -virthost.ostest.test.metalkube.org:5000/localimages/local-release-image -source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -mirrors: -virthost.ostest.test.metalkube.org:5000/localimages/local-release-image -source: registry.ci.openshift.org/ocp/release ----- - -The following example shows part of the `imageContentSourcePolicy.yaml` file generated by the oc-mirror plugin. The file can be found in the results directory, for example `oc-mirror-workspace/results-1682697932/`. - -.Example `imageContentSourcePolicy.yaml` file - -[source,yaml] ----- -spec: - repositoryDigestMirrors: - - mirrors: - - virthost.ostest.test.metalkube.org:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev - - mirrors: - - virthost.ostest.test.metalkube.org:5000/openshift/release-images - source: quay.io/openshift-release-dev/ocp-release ----- - - diff --git a/modules/agent-install-configuring-for-disconnected-registry.adoc b/modules/agent-install-configuring-for-disconnected-registry.adoc deleted file mode 100644 index e1a8378c6594..000000000000 --- a/modules/agent-install-configuring-for-disconnected-registry.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc - -:_content-type: Procedure[id="agent-install-configuring-for-disconnected-registry_{context}"] -= Configuring the Agent-based Installer to use mirrored images - -You must use the output of either the `oc adm release mirror` command or the oc-mirror plugin to configure the Agent-based Installer to use mirrored images. - -.Procedure - -. If you used the oc-mirror plugin to mirror your release images: - -.. Open the `imageContentSourcePolicy.yaml` located in the results directory, for example `oc-mirror-workspace/results-1682697932/`. - -.. Copy the text in the `repositoryDigestMirrors` section of the yaml file. - -. If you used the `oc adm release mirror` command to mirror your release images: - -* Copy the text in the `imageContentSources` section of the command output. - -. Paste the copied text into the `imageContentSources` field of the `install-config.yaml` file. - -. Add the certificate file used for the mirror registry to the `additionalTrustBundle` field of the yaml file. -+ -[IMPORTANT] -==== -The value must be the contents of the certificate file that you used for your mirror registry. -The certificate file can be an existing, trusted certificate authority, or the self-signed certificate that you generated for the mirror registry. -==== -+ -.Example `install-config.yaml` file - -[source,yaml] ----- - additionalTrustBundle: | - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- ----- - -. If you are using {ztp} manifests: add the `registries.conf` and `ca-bundle.crt` files to the `mirror` path to add the mirror configuration in the agent ISO image. -+ -[NOTE] -==== -You can create the `registries.conf` file from the output of either the `oc adm release mirror` command or the `oc mirror` plugin. The format of the `/etc/containers/registries.conf` file has changed. It is now version 2 and in TOML format. -==== -+ -.Example `registries.conf` file - -[source,toml] ----- -[[registry]] -location = "registry.ci.openshift.org/ocp/release" mirror-by-digest-only = true - -[[registry.mirror]] location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image" - -[[registry]] -location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev" mirror-by-digest-only = true - -[[registry.mirror]] location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image" ----- \ No newline at end of file diff --git a/modules/agent-install-ipi-install-root-device-hints.adoc b/modules/agent-install-ipi-install-root-device-hints.adoc deleted file mode 100644 index 15133769910e..000000000000 --- a/modules/agent-install-ipi-install-root-device-hints.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// This is included in the following assemblies: -// -// preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id='root-device-hints_{context}'] -= About root device hints - -The `rootDeviceHints` parameter enables the installer to provision the {op-system-first} image to a particular device. The installer examines the devices in the order it discovers them, and compares the discovered values with the hint values. The installer uses the first discovered device that matches the hint value. The configuration can combine multiple hints, but a device must match all hints for the installer to select it. - -.Subfields - -|=== -| Subfield | Description - -| `deviceName` | A string containing a Linux device name like `/dev/vda`. The hint must match the actual value exactly. - -| `hctl` | A string containing a SCSI bus address like `0:0:0:0`. The hint must match the actual value exactly. - -| `model` | A string containing a vendor-specific device identifier. The hint can be a substring of the actual value. - -| `vendor` | A string containing the name of the vendor or manufacturer of the device. The hint can be a sub-string of the actual value. - -| `serialNumber` | A string containing the device serial number. The hint must match the actual value exactly. - -| `minSizeGigabytes` | An integer representing the minimum size of the device in gigabytes. - -| `wwn` | A string containing the unique storage identifier. The hint must match the actual value exactly. - -| `rotational` | A boolean indicating whether the device should be a rotating disk (true) or not (false). - -|=== - -.Example usage - -[source,yaml] ----- - - name: master-0 - role: master - rootDeviceHints: - deviceName: "/dev/sda" ----- diff --git a/modules/agent-install-networking.adoc b/modules/agent-install-networking.adoc deleted file mode 100644 index 4797f35c5962..000000000000 --- a/modules/agent-install-networking.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: CONCEPT -[id="agent-install-networking_{context}"] -= About networking - -The *rendezvous IP* must be known at the time of generating the agent ISO, so that during the initial boot all the hosts can check in to the assisted service. -If the IP addresses are assigned using a Dynamic Host Configuration Protocol (DHCP) server, then the `rendezvousIP` field must be set to an IP address of one of the hosts that will become part of the deployed control plane. -In an environment without a DHCP server, you can define IP addresses statically. - -In addition to static IP addresses, you can apply any network configuration that is in NMState format. This includes VLANs and NIC bonds. - -== DHCP - -.Preferred method: `install-config.yaml` and `agent.config.yaml` - -You must specify the value for the `rendezvousIP` field. The `networkConfig` fields can be left blank: - -.Sample agent-config.yaml.file - -[source,yaml] ----- -apiVersion: v1alpha1 -kind: AgentConfig -metadata: - name: sno-cluster -rendezvousIP: 192.168.111.80 <1> ----- -<1> The IP address for the rendezvous host. - -== Static networking - -.. Preferred method: `install-config.yaml` and `agent.config.yaml` - -+ -.Sample agent-config.yaml.file -+ -[source,yaml] ----- - cat > agent-config.yaml << EOF - apiVersion: v1alpha1 - kind: AgentConfig - metadata: - name: sno-cluster - rendezvousIP: 192.168.111.80 <1> - hosts: - - hostname: master-0 - interfaces: - - name: eno1 - macAddress: 00:ef:44:21:e6:a5 <2> - networkConfig: - interfaces: - - name: eno1 - type: ethernet - state: up - mac-address: 00:ef:44:21:e6:a5 - ipv4: - enabled: true - address: - - ip: 192.168.111.80 <3> - prefix-length: 23 <4> - dhcp: false - dns-resolver: - config: - server: - - 192.168.111.1 <5> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 192.168.111.1 <6> - next-hop-interface: eth0 - table-id: 254 ----- -<1> If a value is not specified for the `rendezvousIP` field, one address will be chosen from the static IP addresses specified in the `networkConfig` fields. -<2> The MAC address of an interface on the host, used to determine which host to apply the configuration to. -<3> The static IP address of the target bare metal host. -<4> The static IP address’s subnet prefix for the target bare metal host. -<5> The DNS server for the target bare metal host. -<6> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. - -+ -.. Optional method: {ztp} manifests - -+ -The optional method of the {ztp} custom resources comprises 6 custom resources; you can configure static IPs in the `nmstateconfig.yaml` file. - -+ -[source,yaml] ----- -apiVersion: agent-install.openshift.io/v1beta1 -kind: NMStateConfig -metadata: - name: master-0 - namespace: openshift-machine-api - labels: - cluster0-nmstate-label-name: cluster0-nmstate-label-value -spec: - config: - interfaces: - - name: eth0 - type: ethernet - state: up - mac-address: 52:54:01:aa:aa:a1 - ipv4: - enabled: true - address: - - ip: 192.168.122.2 <1> - prefix-length: 23 <2> - dhcp: false - dns-resolver: - config: - server: - - 192.168.122.1 <3> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 192.168.122.1 <4> - next-hop-interface: eth0 - table-id: 254 - interfaces: - - name: eth0 - macAddress: 52:54:01:aa:aa:a1 <5> ----- -<1> The static IP address of the target bare metal host. -<2> The static IP address’s subnet prefix for the target bare metal host. -<3> The DNS server for the target bare metal host. -<4> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. -<5> The MAC address of an interface on the host, used to determine which host to apply the configuration to. - -The rendezvous IP is chosen from the static IP addresses specified in the `config` fields. diff --git a/modules/agent-install-sample-config-bond-sriov.adoc b/modules/agent-install-sample-config-bond-sriov.adoc deleted file mode 100644 index 1cdbcb2cbc25..000000000000 --- a/modules/agent-install-sample-config-bond-sriov.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id="agent-install-sample-config-bond-sriov_{context}"] -= Example: Bonds and SR-IOV dual-nic node network configuration - -:FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices -include::snippets/technology-preview.adoc[leveloffset=+1] - -The following `agent-config.yaml` file is an example of a manifest for dual port NIC with a bond and SR-IOV interfaces: - -[source,yaml] ----- -apiVersion: v1alpha1 -kind: AgentConfig -rendezvousIP: 10.10.10.14 -hosts: - - hostname: worker-1 - interfaces: - - name: eno1 - macAddress: 0c:42:a1:55:f3:06 - - name: eno2 - macAddress: 0c:42:a1:55:f3:07 - networkConfig: <1> - interfaces: <2> - - name: eno1 <3> - type: ethernet <4> - state: up - mac-address: 0c:42:a1:55:f3:06 - ipv4: - enabled: true - dhcp: false <5> - ethernet: - sr-iov: - total-vfs: 2 <6> - ipv6: - enabled: false - - name: sriov:eno1:0 - type: ethernet - state: up <7> - ipv4: - enabled: false <8> - ipv6: - enabled: false - dhcp: false - - name: sriov:eno1:1 - type: ethernet - state: down - - name: eno2 - type: ethernet - state: up - mac-address: 0c:42:a1:55:f3:07 - ipv4: - enabled: true - ethernet: - sr-iov: - total-vfs: 2 - ipv6: - enabled: false - - name: sriov:eno2:0 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: false - - name: sriov:eno2:1 - type: ethernet - state: down - - name: bond0 - type: bond - state: up - min-tx-rate: 100 <9> - max-tx-rate: 200 <10> - link-aggregation: - mode: active-backup <11> - options: - primary: sriov:eno1:0 <12> - port: - - sriov:eno1:0 - - sriov:eno2:0 - ipv4: - address: - - ip: 10.19.16.57 <13> - prefix-length: 23 - dhcp: false - enabled: true - ipv6: - enabled: false - dns-resolver: - config: - server: - - 10.11.5.160 - - 10.2.70.215 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 10.19.17.254 - next-hop-interface: bond0 <14> - table-id: 254 ----- -<1> The `networkConfig` field contains information about the network configuration of the host, with subfields including `interfaces`,`dns-resolver`, and `routes`. -<2> The `interfaces` field is an array of network interfaces defined for the host. -<3> The name of the interface. -<4> The type of interface. This example creates an ethernet interface. -<5> Set this to `false` to disable DHCP for the physical function (PF) if it is not strictly required. -<6> Set this to the number of SR-IOV virtual functions (VFs) to instantiate. -<7> Set this to `up`. -<8> Set this to `false` to disable IPv4 addressing for the VF attached to the bond. -<9> Sets a minimum transmission rate, in Mbps, for the VF. This sample value sets a rate of 100 Mbps. - * This value must be less than or equal to the maximum transmission rate. - * Intel NICs do not support the `min-tx-rate` parameter. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1772847[*BZ#1772847*]. -<10> Sets a maximum transmission rate, in Mbps, for the VF. This sample value sets a rate of 200 Mbps. -<11> Sets the desired bond mode. -<12> Sets the preferred port of the bonding interface. The primary device is the first of the bonding interfaces to be used and is not abandoned unless it fails. This setting is particularly useful when one NIC in the bonding interface is faster and, therefore, able to handle a bigger load. This setting is only valid when the bonding interface is in `active-backup` mode (mode 1) and `balance-tlb` (mode 5). -<13> Sets a static IP address for the bond interface. This is the node IP address. -<14> Sets `bond0` as the gateway for the default route. \ No newline at end of file diff --git a/modules/agent-install-sample-config-bonds-vlans.adoc b/modules/agent-install-sample-config-bonds-vlans.adoc deleted file mode 100644 index 8c1a13ea404e..000000000000 --- a/modules/agent-install-sample-config-bonds-vlans.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id="agent-install-sample-config-bonds-vlans_{context}"] -= Example: Bonds and VLAN interface node network configuration - -The following `agent-config.yaml` file is an example of a manifest for bond and VLAN interfaces. - -[source,yaml] ----- - apiVersion: v1alpha1 - kind: AgentConfig - rendezvousIP: 10.10.10.14 - hosts: - - hostname: master0 - role: master - interfaces: - - name: enp0s4 - macAddress: 00:21:50:90:c0:10 - - name: enp0s5 - macAddress: 00:21:50:90:c0:20 - networkConfig: - interfaces: - - name: bond0.300 <1> - type: vlan <2> - state: up - vlan: - base-iface: bond0 - id: 300 - ipv4: - enabled: true - address: - - ip: 10.10.10.14 - prefix-length: 24 - dhcp: false - - name: bond0 <1> - type: bond <3> - state: up - mac-address: 00:21:50:90:c0:10 <4> - ipv4: - enabled: false - ipv6: - enabled: false - link-aggregation: - mode: active-backup <5> - options: - miimon: "150" <6> - port: - - enp0s4 - - enp0s5 - dns-resolver: <7> - config: - server: - - 10.10.10.11 - - 10.10.10.12 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 10.10.10.10 <8> - next-hop-interface: bond0.300 <9> - table-id: 254 ----- -<1> Name of the interface. -<2> The type of interface. This example creates a VLAN. -<3> The type of interface. This example creates a bond. -<4> The mac address of the interface. -<5> The `mode` attribute specifies the bonding mode. -<6> Specifies the MII link monitoring frequency in milliseconds. This example inspects the bond link every 150 milliseconds. -<7> Optional: Specifies the search and server settings for the DNS server. -<8> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. -<9> Next hop interface for the node traffic. diff --git a/modules/agent-installer-configuring-fips-compliance.adoc b/modules/agent-installer-configuring-fips-compliance.adoc deleted file mode 100644 index 7e988324cbd8..000000000000 --- a/modules/agent-installer-configuring-fips-compliance.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_bases_installer/preparing-to-install-with-agent-based-installer.adoc - - -:_content-type: PROCEDURE -[id="agent-installer-configuring-fips-compliance_{context}"] - -= Configuring FIPS through the Agent-based Installer - -During a cluster deployment, the Federal Information Processing Standards (FIPS) change is applied when the Red Hat Enterprise Linux CoreOS (RHCOS) machines are deployed in your cluster. For Red Hat Enterprise Linux (RHEL) machines, you must enable FIPS mode when you install the operating system on the machines that you plan to use as worker machines. - -You can enable FIPS mode through the preferred method of `install-config.yaml` and `agent.config.yaml`: - -. You must set value of the `fips` field to `True` in the `install-config.yaml` file: -+ -.Sample install-config.yaml.file - -[source,yaml] ----- -apiVersion: v1 -baseDomain: test.example.com -metadata: - name: sno-cluster -fips: True ----- - -. Optional: If you are using the {ztp} manifests, you must set the value of `fips` as `True` in the `Agent-install.openshift.io/install-config-overrides` field in the `agent-cluster-install.yaml` file: - -+ -.Sample agent-cluster-install.yaml file -[source,yaml] ----- -apiVersion: extensions.hive.openshift.io/v1beta1 -kind: AgentClusterInstall -metadata: - annotations: - agent-install.openshift.io/install-config-overrides: '{"fips": True}' - name: sno-cluster - namespace: sno-cluster-test ----- diff --git a/modules/agent-installer-fips-compliance.adoc b/modules/agent-installer-fips-compliance.adoc deleted file mode 100644 index fef47f8db3f8..000000000000 --- a/modules/agent-installer-fips-compliance.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_bases_installer/preparing-to-install-with-agent-based-installer.adoc - - -:_content-type: CONCEPT -[id="agent-installer-fips-compliance_{context}"] -= About FIPS compliance - -For many {product-title} customers, regulatory readiness, or compliance, on some level is required before any systems can be put into production. That regulatory readiness can be imposed by national standards, industry standards or the organization's corporate governance framework. -Federal Information Processing Standards (FIPS) compliance is one of the most critical components required in highly secure environments to ensure that only supported cryptographic technologies are allowed on nodes. - -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== diff --git a/modules/ai-adding-worker-nodes-to-cluster.adoc b/modules/ai-adding-worker-nodes-to-cluster.adoc deleted file mode 100644 index 60313ea00a3e..000000000000 --- a/modules/ai-adding-worker-nodes-to-cluster.adoc +++ /dev/null @@ -1,320 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="ai-adding-worker-nodes-to-cluster_{context}"] -= Adding worker nodes using the Assisted Installer REST API - -You can add worker nodes to clusters using the Assisted Installer REST API. - -.Prerequisites - -* Install the OpenShift Cluster Manager CLI (`ocm`). - -* Log in to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager}] as a user with cluster creation privileges. - -* Install `jq`. - -* Ensure that all the required DNS records exist for the cluster that you are adding the worker node to. - -.Procedure - -. Authenticate against the Assisted Installer REST API and generate a JSON web token (JWT) for your session. The generated JWT token is valid for 15 minutes only. - -. Set the `$API_URL` variable by running the following command: -+ -[source,terminal] ----- -$ export API_URL= <1> ----- -<1> Replace `` with the Assisted Installer API URL, for example, `https://api.openshift.com` - -. Import the {sno} cluster by running the following commands: - -.. Set the `$OPENSHIFT_CLUSTER_ID` variable. Log in to the cluster and run the following command: -+ -[source,terminal] ----- -$ export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}') ----- - -.. Set the `$CLUSTER_REQUEST` variable that is used to import the cluster: -+ -[source,terminal] ----- -$ export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{ - "api_vip_dnsname": "", <1> - "openshift_cluster_id": $openshift_cluster_id, - "name": "" <2> -}') ----- -<1> Replace `` with the hostname for the cluster's API server. This can be the DNS domain for the API server or the IP address of the single node which the worker node can reach. For example, `api.compute-1.example.com`. -<2> Replace `` with the plain text name for the cluster. The cluster name should match the cluster name that was set during the Day 1 cluster installation. - -.. Import the cluster and set the `$CLUSTER_ID` variable. Run the following command: -+ -[source,terminal] ----- -$ CLUSTER_ID=$(curl "$API_URL/api/assisted-install/v2/clusters/import" -H "Authorization: Bearer ${JWT_TOKEN}" -H 'accept: application/json' -H 'Content-Type: application/json' \ - -d "$CLUSTER_REQUEST" | tee /dev/stderr | jq -r '.id') ----- - -. Generate the `InfraEnv` resource for the cluster and set the `$INFRA_ENV_ID` variable by running the following commands: - -.. Download the pull secret file from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. - -.. Set the `$INFRA_ENV_REQUEST` variable: -+ -[source,terminal] ----- -export INFRA_ENV_REQUEST=$(jq --null-input \ - --slurpfile pull_secret \//<1> - --arg ssh_pub_key "$(cat )" \//<2> - --arg cluster_id "$CLUSTER_ID" '{ - "name": "", <3> - "pull_secret": $pull_secret[0] | tojson, - "cluster_id": $cluster_id, - "ssh_authorized_key": $ssh_pub_key, - "image_type": "" <4> -}') ----- -<1> Replace `` with the path to the local file containing the downloaded pull secret from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. -<2> Replace `` with the path to the public SSH key required to access the host. If you do not set this value, you cannot access the host while in discovery mode. -<3> Replace `` with the plain text name for the `InfraEnv` resource. -<4> Replace `` with the ISO image type, either `full-iso` or `minimal-iso`. - -.. Post the `$INFRA_ENV_REQUEST` to the link:https://api.openshift.com/?urls.primaryName=assisted-service%20service#/installer/RegisterInfraEnv[/v2/infra-envs] API and set the `$INFRA_ENV_ID` variable: -+ -[source,terminal] ----- -$ INFRA_ENV_ID=$(curl "$API_URL/api/assisted-install/v2/infra-envs" -H "Authorization: Bearer ${JWT_TOKEN}" -H 'accept: application/json' -H 'Content-Type: application/json' -d "$INFRA_ENV_REQUEST" | tee /dev/stderr | jq -r '.id') ----- - -. Get the URL of the discovery ISO for the cluster worker node by running the following command: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/infra-envs/$INFRA_ENV_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -r '.download_url' ----- -+ -.Example output -[source,terminal] ----- -https://api.openshift.com/api/assisted-images/images/41b91e72-c33e-42ee-b80f-b5c5bbf6431a?arch=x86_64&image_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTYwMjYzNzEsInN1YiI6IjQxYjkxZTcyLWMzM2UtNDJlZS1iODBmLWI1YzViYmY2NDMxYSJ9.1EX_VGaMNejMhrAvVRBS7PDPIQtbOOc8LtG8OukE1a4&type=minimal-iso&version=$VERSION ----- - -. Download the ISO: -+ -[source,terminal] ----- -$ curl -L -s '' --output rhcos-live-minimal.iso <1> ----- -<1> Replace `` with the URL for the ISO from the previous step. - -. Boot the new worker host from the downloaded `rhcos-live-minimal.iso`. - -. Get the list of hosts in the cluster that are _not_ installed. Keep running the following command until the new host shows up: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -r '.hosts[] | select(.status != "installed").id' ----- -+ -.Example output -[source,terminal] ----- -2294ba03-c264-4f11-ac08-2f1bb2f8c296 ----- - -. Set the `$HOST_ID` variable for the new worker node, for example: -+ -[source,terminal] ----- -$ HOST_ID= <1> ----- -<1> Replace `` with the host ID from the previous step. - -. Check that the host is ready to install by running the following command: -+ -[NOTE] -==== -Ensure that you copy the entire command including the complete `jq` expression. -==== -+ -[source,terminal] ----- -$ curl -s $API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID -H "Authorization: Bearer ${JWT_TOKEN}" | jq ' -def host_name($host): - if (.suggested_hostname // "") == "" then - if (.inventory // "") == "" then - "Unknown hostname, please wait" - else - .inventory | fromjson | .hostname - end - else - .suggested_hostname - end; - -def is_notable($validation): - ["failure", "pending", "error"] | any(. == $validation.status); - -def notable_validations($validations_info): - [ - $validations_info // "{}" - | fromjson - | to_entries[].value[] - | select(is_notable(.)) - ]; - -{ - "Hosts validations": { - "Hosts": [ - .hosts[] - | select(.status != "installed") - | { - "id": .id, - "name": host_name(.), - "status": .status, - "notable_validations": notable_validations(.validations_info) - } - ] - }, - "Cluster validations info": { - "notable_validations": notable_validations(.validations_info) - } -} -' -r ----- -+ -.Example output -[source,terminal] ----- -{ - "Hosts validations": { - "Hosts": [ - { - "id": "97ec378c-3568-460c-bc22-df54534ff08f", - "name": "localhost.localdomain", - "status": "insufficient", - "notable_validations": [ - { - "id": "ntp-synced", - "status": "failure", - "message": "Host couldn't synchronize with any NTP server" - }, - { - "id": "api-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - }, - { - "id": "api-int-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - }, - { - "id": "apps-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - } - ] - } - ] - }, - "Cluster validations info": { - "notable_validations": [] - } -} ----- - -. When the previous command shows that the host is ready, start the installation using the link:https://api.openshift.com/?urls.primaryName=assisted-service%20service#/installer/v2InstallHost[/v2/infra-envs/{infra_env_id}/hosts/{host_id}/actions/install] API by running the following command: -+ -[source,terminal] ----- -$ curl -X POST -s "$API_URL/api/assisted-install/v2/infra-envs/$INFRA_ENV_ID/hosts/$HOST_ID/actions/install" -H "Authorization: Bearer ${JWT_TOKEN}" ----- - -. As the installation proceeds, the installation generates pending certificate signing requests (CSRs) for the worker node. -+ -[IMPORTANT] -==== -You must approve the CSRs to complete the installation. -==== -+ -Keep running the following API call to monitor the cluster installation: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq '{ - "Cluster day-2 hosts": - [ - .hosts[] - | select(.status != "installed") - | {id, requested_hostname, status, status_info, progress, status_updated_at, updated_at, infra_env_id, cluster_id, created_at} - ] -}' ----- -+ -.Example output -[source,terminal] ----- -{ - "Cluster day-2 hosts": [ - { - "id": "a1c52dde-3432-4f59-b2ae-0a530c851480", - "requested_hostname": "control-plane-1", - "status": "added-to-existing-cluster", - "status_info": "Host has rebooted and no further updates will be posted. Please check console for progress and to possibly approve pending CSRs", - "progress": { - "current_stage": "Done", - "installation_percentage": 100, - "stage_started_at": "2022-07-08T10:56:20.476Z", - "stage_updated_at": "2022-07-08T10:56:20.476Z" - }, - "status_updated_at": "2022-07-08T10:56:20.476Z", - "updated_at": "2022-07-08T10:57:15.306369Z", - "infra_env_id": "b74ec0c3-d5b5-4717-a866-5b6854791bd3", - "cluster_id": "8f721322-419d-4eed-aa5b-61b50ea586ae", - "created_at": "2022-07-06T22:54:57.161614Z" - } - ] -} ----- - -. Optional: Run the following command to see all the events for the cluster: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/events?cluster_id=$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -c '.[] | {severity, message, event_time, host_id}' ----- -+ -.Example output -[source,terminal] ----- -{"severity":"info","message":"Host compute-0: updated status from insufficient to known (Host is ready to be installed)","event_time":"2022-07-08T11:21:46.346Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from known to installing (Installation is in progress)","event_time":"2022-07-08T11:28:28.647Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from installing to installing-in-progress (Starting installation)","event_time":"2022-07-08T11:28:52.068Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Uploaded logs for host compute-0 cluster 8f721322-419d-4eed-aa5b-61b50ea586ae","event_time":"2022-07-08T11:29:47.802Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from installing-in-progress to added-to-existing-cluster (Host has rebooted and no further updates will be posted. Please check console for progress and to possibly approve pending CSRs)","event_time":"2022-07-08T11:29:48.259Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host: compute-0, reached installation stage Rebooting","event_time":"2022-07-08T11:29:48.261Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} ----- - -. Log in to the cluster and approve the pending CSRs to complete the installation. - -.Verification - -* Check that the new worker node was successfully added to the cluster with a status of `Ready`: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -control-plane-1.example.com Ready master,worker 56m v1.27.3 -compute-1.example.com Ready worker 11m v1.27.3 ----- diff --git a/modules/ai-authenticating-against-ai-rest-api.adoc b/modules/ai-authenticating-against-ai-rest-api.adoc deleted file mode 100644 index 020e34fa64c0..000000000000 --- a/modules/ai-authenticating-against-ai-rest-api.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="ai-authenticating-against-ai-rest-api_{context}"] -= Authenticating against the Assisted Installer REST API - -Before you can use the Assisted Installer REST API, you must authenticate against the API using a JSON web token (JWT) that you generate. - -.Prerequisites - -* Log in to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager}] as a user with cluster creation privileges. - -* Install `jq`. - -.Procedure - -. Log in to link:https://console.redhat.com/openshift/token/show[{cluster-manager}] and copy your API token. - -. Set the `$OFFLINE_TOKEN` variable using the copied API token by running the following command: -+ -[source,terminal] ----- -$ export OFFLINE_TOKEN= ----- - -. Set the `$JWT_TOKEN` variable using the previously set `$OFFLINE_TOKEN` variable: -+ -[source,terminal] ----- -$ export JWT_TOKEN=$( - curl \ - --silent \ - --header "Accept: application/json" \ - --header "Content-Type: application/x-www-form-urlencoded" \ - --data-urlencode "grant_type=refresh_token" \ - --data-urlencode "client_id=cloud-services" \ - --data-urlencode "refresh_token=${OFFLINE_TOKEN}" \ - "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token" \ - | jq --raw-output ".access_token" -) ----- -+ -[NOTE] -==== -The JWT token is valid for 15 minutes only. -==== - -.Verification - -* Optional: Check that you can access the API by running the following command: -+ -[source,terminal] ----- -$ curl -s https://api.openshift.com/api/assisted-install/v2/component-versions -H "Authorization: Bearer ${JWT_TOKEN}" | jq ----- -+ -.Example output -[source,yaml] ----- -{ - "release_tag": "v2.5.1", - "versions": - { - "assisted-installer": "registry.redhat.io/rhai-tech-preview/assisted-installer-rhel8:v1.0.0-175", - "assisted-installer-controller": "registry.redhat.io/rhai-tech-preview/assisted-installer-reporter-rhel8:v1.0.0-223", - "assisted-installer-service": "quay.io/app-sre/assisted-service:ac87f93", - "discovery-agent": "registry.redhat.io/rhai-tech-preview/assisted-installer-agent-rhel8:v1.0.0-156" - } -} ----- diff --git a/modules/ai-sno-requirements-for-installing-worker-nodes.adoc b/modules/ai-sno-requirements-for-installing-worker-nodes.adoc deleted file mode 100644 index b0e32c1e235d..000000000000 --- a/modules/ai-sno-requirements-for-installing-worker-nodes.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// This is included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -[id="ai-sno-requirements-for-installing-worker-nodes_{context}"] -= Requirements for installing {sno} worker nodes - -To install a {sno} worker node, you must address the following requirements: - -* *Administration host:* You must have a computer to prepare the ISO and to monitor the installation. - -* *Production-grade server:* Installing {sno} worker nodes requires a server with sufficient resources to run {product-title} services and a production workload. -+ -.Minimum resource requirements -[options="header"] -|==== - -|Profile|vCPU|Memory|Storage - -|Minimum|2 vCPU cores|8GB of RAM| 100GB - -|==== -+ -[NOTE] -==== -One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: - -(threads per core × cores) × sockets = vCPUs -==== -+ -The server must have a Baseboard Management Controller (BMC) when booting with virtual media. - -* *Networking:* The worker node server must have access to the internet or access to a local registry if it is not connected to a routable network. The worker node server must have a DHCP reservation or a static IP address and be able to access the {sno} cluster Kubernetes API, ingress route, and cluster node domain names. You must configure the DNS to resolve the IP address to each of the following fully qualified domain names (FQDN) for the {sno} cluster: -+ -.Required DNS records -[options="header"] -|==== - -|Usage|FQDN|Description - -|Kubernetes API|`api..`| Add a DNS A/AAAA or CNAME record. This record must be resolvable by clients external to the cluster. - -|Internal API|`api-int..`| Add a DNS A/AAAA or CNAME record when creating the ISO manually. This record must be resolvable by nodes within the cluster. - -|Ingress route|`*.apps..`| Add a wildcard DNS A/AAAA or CNAME record that targets the node. This record must be resolvable by clients external to the cluster. - -|==== -+ -Without persistent IP addresses, communications between the `apiserver` and `etcd` might fail. diff --git a/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc b/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc deleted file mode 100644 index 74d9b70fbee6..000000000000 --- a/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc -// * installing/installing_azure/manually-creating-iam-azure.adoc -// * installing/installing_gcp/manually-creating-iam-gcp.adoc -// * installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc - -ifeval::["{context}" == "manually-creating-iam-aws"] -:aws: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:azure: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:ibm-cloud: -endif::[] - -[id="alternatives-to-storing-admin-secrets-in-kube-system_{context}"] -= Alternatives to storing administrator-level secrets in the kube-system project - -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). You can configure the CCO to suit the security requirements of your organization by setting different values for the `credentialsMode` parameter in the `install-config.yaml` file. - -ifdef::aws,google-cloud-platform[] -If you prefer not to store an administrator-level credential secret in the cluster `kube-system` project, you can choose one of the following options when installing {product-title}: - -endif::aws,google-cloud-platform[] - -ifdef::aws[] -* *Use the Amazon Web Services Security Token Service*: -+ -You can use the CCO utility (`ccoctl`) to configure the cluster to use the Amazon Web Services Security Token Service (AWS STS). When the CCO utility is used to configure the cluster for STS, it assigns IAM roles that provide short-term, limited-privilege security credentials to components. -+ -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -endif::aws[] - -ifdef::google-cloud-platform[] -* *Use manual mode with GCP Workload Identity*: -+ -You can use the CCO utility (`ccoctl`) to configure the cluster to use manual mode with GCP Workload Identity. When the CCO utility is used to configure the cluster for GCP Workload Identity, it signs service account tokens that provide short-term, limited-privilege security credentials to components. -+ -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -endif::google-cloud-platform[] - -ifdef::aws,google-cloud-platform[] -* *Manage cloud credentials manually*: -+ -You can set the `credentialsMode` parameter for the CCO to `Manual` to manage cloud credentials manually. Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. - -* *Remove the administrator-level credential secret after installing {product-title} with mint mode*: -+ -If you are using the CCO with the `credentialsMode` parameter set to `Mint`, you can remove or rotate the administrator-level credential after installing {product-title}. Mint mode is the default configuration for the CCO. This option requires the presence of the administrator-level credential during an installation. The administrator-level credential is used during the installation to mint other credentials with some permissions granted. The original credential secret is not stored in the cluster permanently. - -[NOTE] -==== -Prior to a non z-stream upgrade, you must reinstate the credential secret with the administrator-level credential. If the credential is not present, the upgrade might be blocked. -==== - -endif::aws,google-cloud-platform[] - -ifdef::azure[] -If you prefer not to store an administrator-level credential secret in the cluster `kube-system` project, you can set the `credentialsMode` parameter for the CCO to `Manual` when installing {product-title} and manage your cloud credentials manually. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. -endif::azure[] - -ifdef::ibm-cloud[] -Storing an administrator-level credential secret in the cluster `kube-system` project is not supported for IBM Cloud; therefore, you must set the `credentialsMode` parameter for the CCO to `Manual` when installing {product-title} and manage your cloud credentials manually. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. -endif::ibm-cloud[] - -ifeval::["{context}" == "manually-creating-iam-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:!azure: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:!ibm-cloud: -endif::[] diff --git a/modules/annotating-a-route-with-a-cookie-name.adoc b/modules/annotating-a-route-with-a-cookie-name.adoc deleted file mode 100644 index f2bef5522ad0..000000000000 --- a/modules/annotating-a-route-with-a-cookie-name.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// *using-cookies-to-keep-route-statefulness - -[id="annotating-a-route-with-a-cookie_{context}"] -= Annotating a route with a cookie - -You can set a cookie name to overwrite the default, auto-generated one for the -route. This allows the application receiving route traffic to know the cookie -name. By deleting the cookie it can force the next request to re-choose an -endpoint. So, if a server was overloaded it tries to remove the requests from the -client and redistribute them. - -.Procedure - -. Annotate the route with the desired cookie name: -+ -[source,terminal] ----- -$ oc annotate route router.openshift.io/="-" ----- -+ -For example, to annotate the cookie name of `my_cookie` to the `my_route` with -the annotation of `my_cookie_anno`: -+ -[source,terminal] ----- -$ oc annotate route my_route router.openshift.io/my_cookie="-my_cookie_anno" ----- - -. Save the cookie, and access the route: -+ -[source,terminal] ----- -$ curl $my_route -k -c /tmp/my_cookie ----- diff --git a/modules/api-compatibility-common-terminology.adoc b/modules/api-compatibility-common-terminology.adoc deleted file mode 100644 index 9862ede14c06..000000000000 --- a/modules/api-compatibility-common-terminology.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-common-terminology_{context}"] -= API compatibility common terminology - -[id="api-compatibility-common-terminology-api_{context}"] -== Application Programming Interface (API) - -An API is a public interface implemented by a software program that enables it to interact with other software. In {product-title}, the API is served from a centralized API server and is used as the hub for all system interaction. - -[id="api-compatibility-common-terminology-aoe_{context}"] -== Application Operating Environment (AOE) - -An AOE is the integrated environment that executes the end-user application program. The AOE is a containerized environment that provides isolation from the host operating system (OS). At a minimum, AOE allows the application to run in an isolated manner from the host OS libraries and binaries, but still share the same OS kernel as all other containers on the host. The AOE is enforced at runtime and it describes the interface between an application and its operating environment. It includes intersection points between the platform, operating system and environment, with the user application including projection of downward API, DNS, resource accounting, device access, platform workload identity, isolation among containers, isolation between containers and host OS. - -The AOE does not include components that might vary by installation, such as Container Network Interface (CNI) plugin selection or extensions to the product such as admission hooks. Components that integrate with the cluster at a level below the container environment might be subjected to additional variation between versions. - -[id="api-compatibility-common-terminology-virtualized_{context}"] -== Compatibility in a virtualized environment - -Virtual environments emulate bare-metal environments such that unprivileged applications that run on bare-metal environments will run, unmodified, in corresponding virtual environments. Virtual environments present simplified abstracted views of physical resources, so some differences might exist. - -[id="api-compatibility-common-terminology-cloud_{context}"] -== Compatibility in a cloud environment - -{product-title} might choose to offer integration points with a hosting cloud environment via cloud provider specific integrations. The compatibility of these integration points are specific to the guarantee provided by the native cloud vendor and its intersection with the {product-title} compatibility window. Where {product-title} provides an integration with a cloud environment natively as part of the default installation, Red Hat develops against stable cloud API endpoints to provide commercially reasonable support with forward looking compatibility that includes stable deprecation policies. Example areas of integration between the cloud provider and {product-title} include, but are not limited to, dynamic volume provisioning, service load balancer integration, pod workload identity, dynamic management of compute, and infrastructure provisioned as part of initial installation. - -[id="api-compatibility-common-terminology-releases_{context}"] -== Major, minor, and z-stream releases - -A Red Hat major release represents a significant step in the development of a product. Minor releases appear more frequently within the scope of a major release and represent deprecation boundaries that might impact future application compatibility. A z-stream release is an update to a minor release which provides a stream of continuous fixes to an associated minor release. API and AOE compatibility is never broken in a z-stream release except when this policy is explicitly overridden in order to respond to an unforeseen security impact. - -For example, in the release 4.3.2: - -* 4 is the major release version -* 3 is the minor release version -* 2 is the z-stream release version - -[id="api-compatibility-common-terminology-eus_{context}"] -== Extended user support (EUS) - -A minor release in an {product-title} major release that has an extended support window for critical bug fixes. Users are able to migrate between EUS releases by incrementally adopting minor versions between EUS releases. It is important to note that the deprecation policy is defined across minor releases and not EUS releases. As a result, an EUS user might have to respond to a deprecation when migrating to a future EUS while sequentially upgrading through each minor release. - -[id="api-compatibility-common-terminology-dev-preview_{context}"] -== Developer Preview - -An optional product capability that is not officially supported by Red Hat, but is intended to provide a mechanism to explore early phase technology. By default, Developer Preview functionality is opt-in, and subject to removal at any time. Enabling a Developer Preview feature might render a cluster unsupportable dependent upon the scope of the feature. - -[id="api-compatibility-common-terminology-tech-preview_{context}"] -== Technology Preview - -An optional product capability that provides early access to upcoming product innovations to test functionality and provide feedback during the development process. The feature is not fully supported, might not be functionally complete, and is not intended for production use. Usage of a Technology Preview function requires explicit opt-in. Learn more about the link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope]. diff --git a/modules/api-compatibility-exceptions.adoc b/modules/api-compatibility-exceptions.adoc deleted file mode 100644 index 720e5a597039..000000000000 --- a/modules/api-compatibility-exceptions.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc -// * microshift_rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-exceptions_{context}"] -= API compatibility exceptions - -The following are exceptions to compatibility in {product-title}: - -ifndef::microshift[] -[discrete] -[id="OS-file-system-modifications-not-made_{context}"] -== RHEL CoreOS file system modifications not made with a supported Operator - -No assurances are made at this time that a modification made to the host operating file system is preserved across minor releases except for where that modification is made through the public interface exposed via a supported Operator, such as the Machine Config Operator or Node Tuning Operator. - -[discrete] -[id="modifications-to-cluster-infrastructure-in-cloud_{context}"] -== Modifications to cluster infrastructure in cloud or virtualized environments - -No assurances are made at this time that a modification to the cloud hosting environment that supports the cluster is preserved except for where that modification is made through a public interface exposed in the product or is documented as a supported configuration. Cluster infrastructure providers are responsible for preserving their cloud or virtualized infrastructure except for where they delegate that authority to the product through an API. -endif::microshift[] - -[discrete] -[id="Functional-defaults-between-upgraded-cluster-new-installation_{context}"] -== Functional defaults between an upgraded cluster and a new installation - -No assurances are made at this time that a new installation of a product minor release will have the same functional defaults as a version of the product that was installed with a prior minor release and upgraded to the equivalent version. For example, future versions of the product may provision cloud infrastructure with different defaults than prior minor versions. In addition, different default security choices may be made in future versions of the product than those made in past versions of the product. Past versions of the product will forward upgrade, but preserve legacy choices where appropriate specifically to maintain backwards compatibility. - -[discrete] -[id="API-fields-that-have-the-prefix-unsupported-annotations_{context}"] -== Usage of API fields that have the prefix "unsupported” or undocumented annotations - -Select APIs in the product expose fields with the prefix `unsupported`. No assurances are made at this time that usage of this field is supported across releases or within a release. Product support can request a customer to specify a value in this field when debugging specific problems, but its usage is not supported outside of that interaction. Usage of annotations on objects that are not explicitly documented are not assured support across minor releases. - -[discrete] -[id="API-availability-per-product-installation-topology_{context}"] -== API availability per product installation topology -The OpenShift distribution will continue to evolve its supported installation topology, and not all APIs in one install topology will necessarily be included in another. For example, certain topologies may restrict read/write access to particular APIs if they are in conflict with the product installation topology or not include a particular API at all if not pertinent to that topology. APIs that exist in a given topology will be supported in accordance with the compatibility tiers defined above. diff --git a/modules/api-compatibility-guidelines.adoc b/modules/api-compatibility-guidelines.adoc deleted file mode 100644 index b165dd9bea87..000000000000 --- a/modules/api-compatibility-guidelines.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-guidelines_{context}"] -= API compatibility guidelines - -Red Hat recommends that application developers adopt the following principles in order to improve compatibility with {product-title}: - -* Use APIs and components with support tiers that match the application's need. -* Build applications using the published client libraries where possible. -* Applications are only guaranteed to run correctly if they execute in an environment that is as new as the environment it was built to execute against. An application that was built for {product-title} 4.7 is not guaranteed to function properly on {product-title} 4.6. -* Do not design applications that rely on configuration files provided by system packages or other components. These files can change between versions unless the upstream community is explicitly committed to preserving them. Where appropriate, depend on any Red Hat provided interface abstraction over those configuration files in order to maintain forward compatibility. Direct file system modification of configuration files is discouraged, and users are strongly encouraged to integrate with an Operator provided API where available to avoid dual-writer conflicts. -* Do not depend on API fields prefixed with `unsupported` or annotations that are not explicitly mentioned in product documentation. -* Do not depend on components with shorter compatibility guarantees than your application. -* Do not perform direct storage operations on the etcd server. All etcd access must be performed via the api-server or through documented backup and restore procedures. - -Red Hat recommends that application developers follow the link:https://access.redhat.com/articles/rhel8-abi-compatibility#Guidelines[compatibility guidelines] defined by {op-system-base-full}. {product-title} strongly recommends the following guidelines when building an application or hosting an application on the platform: - -* Do not depend on a specific Linux kernel or {product-title} version. -* Avoid reading from `proc`, `sys`, and `debug` file systems, or any other pseudo file system. -* Avoid using `ioctls` to directly interact with hardware. -* Avoid direct interaction with `cgroups` in order to not conflict with {product-title} host-agents that provide the container execution environment. - -[NOTE] -==== -During the lifecycle of a release, Red Hat makes commercially reasonable efforts to maintain API and application operating environment (AOE) compatibility across all minor releases and z-stream releases. If necessary, Red Hat might make exceptions to this compatibility goal for critical impact security or other significant issues. -==== diff --git a/modules/api-support-deprecation-policy.adoc b/modules/api-support-deprecation-policy.adoc deleted file mode 100644 index c17a088bece5..000000000000 --- a/modules/api-support-deprecation-policy.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-deprecation-policy_{context}"] -= API deprecation policy - -{product-title} is composed of many components sourced from many upstream communities. It is anticipated that the set of components, the associated API interfaces, and correlated features will evolve over time and might require formal deprecation in order to remove the capability. - -[id="deprecating-parts-of-the-api_{context}"] -== Deprecating parts of the API - -{product-title} is a distributed system where multiple components interact with a shared state managed by the cluster control plane through a set of structured APIs. Per Kubernetes conventions, each API presented by {product-title} is associated with a group identifier and each API group is independently versioned. Each API group is managed in a distinct upstream community including Kubernetes, Metal3, Multus, Operator Framework, Open Cluster Management, OpenShift itself, and more. - -While each upstream community might define their own unique deprecation policy for a given API group and version, Red Hat normalizes the community specific policy to one of the compatibility levels defined prior based on our integration in and awareness of each upstream community to simplify end-user consumption and support. - -The deprecation policy and schedule for APIs vary by compatibility level. - -The deprecation policy covers all elements of the API including: - -* REST resources, also known as API objects -* Fields of REST resources -* Annotations on REST resources, excluding version-specific qualifiers -* Enumerated or constant values - -Other than the most recent API version in each group, older API versions must be supported after their announced deprecation for a duration of no less than: - -[cols="2",options="header"] -|=== -|API tier -|Duration - -|Tier 1 -|Stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -|Tier 2 -|9 months or 3 releases from the announcement of deprecation, whichever is longer. - -|Tier 3 -|See the component-specific schedule. - -|Tier 4 -|None. No compatibility is guaranteed. - -|=== - -The following rules apply to all tier 1 APIs: - -* API elements can only be removed by incrementing the version of the group. -* API objects must be able to round-trip between API versions without information loss, with the exception of whole REST resources that do not exist in some versions. In cases where equivalent fields do not exist between versions, data will be preserved in the form of annotations during conversion. -* API versions in a given group can not deprecate until a new API version at least as stable is released, except in cases where the entire API object is being removed. - -[id="deprecating-cli-elements_{context}"] -== Deprecating CLI elements - -Client-facing CLI commands are not versioned in the same way as the API, but are user-facing component systems. The two major ways a user interacts with a CLI are through a command or flag, which is referred to in this context as CLI elements. - -All CLI elements default to API tier 1 unless otherwise noted or the CLI depends on a lower tier API. - -[cols="3",options="header"] -|=== - -| -|Element -|API tier - -|Generally available (GA) -|Flags and commands -|Tier 1 - -|Technology Preview -|Flags and commands -|Tier 3 - -|Developer Preview -|Flags and commands -|Tier 4 - -|=== - -[id="deprecating-entire-component_{context}"] -== Deprecating an entire component - -The duration and schedule for deprecating an entire component maps directly to the duration associated with the highest API tier of an API exposed by that component. For example, a component that surfaced APIs with tier 1 and 2 could not be removed until the tier 1 deprecation schedule was met. - -[cols="2",options="header"] -|=== -|API tier -|Duration - -|Tier 1 -|Stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -|Tier 2 -|9 months or 3 releases from the announcement of deprecation, whichever is longer. - -|Tier 3 -|See the component-specific schedule. - -|Tier 4 -|None. No compatibility is guaranteed. - -|=== diff --git a/modules/api-support-tiers-mapping.adoc b/modules/api-support-tiers-mapping.adoc deleted file mode 100644 index 43d98272d256..000000000000 --- a/modules/api-support-tiers-mapping.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-support-tiers-mapping_{context}"] -= Mapping API tiers to API groups - -For each API tier defined by Red Hat, we provide a mapping table for specific API groups where the upstream communities are committed to maintain forward compatibility. Any API group that does not specify an explicit compatibility level and is not specifically discussed below is assigned API tier 3 by default except for `v1alpha1` APIs which are assigned tier 4 by default. - -[id="mapping-support-tiers-to-kubernetes-api-groups_{context}"] -== Support for Kubernetes API groups - -API groups that end with the suffix `*.k8s.io` or have the form `version.` with no suffix are governed by the Kubernetes deprecation policy and follow a general mapping between API version exposed and corresponding support tier unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`v1` -|Tier 1 - -|`v1beta1` -|Tier 2 - -|`v1alpha1` -|Tier 4 - -|=== - -ifndef::microshift[] -[id="mapping-support-tiers-to-openshift-api-groups_{context}"] -== Support for OpenShift API groups - -API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`apps.openshift.io/v1` -|Tier 1 - -|`authorization.openshift.io/v1` -|Tier 1, some tier 1 deprecated - -|`build.openshift.io/v1` -|Tier 1, some tier 1 deprecated - -|`config.openshift.io/v1` -|Tier 1 - -|`image.openshift.io/v1` -|Tier 1 - -|`network.openshift.io/v1` -|Tier 1 - -|`network.operator.openshift.io/v1` -|Tier 1 - -|`oauth.openshift.io/v1` -|Tier 1 - -|`imagecontentsourcepolicy.operator.openshift.io/v1alpha1` -|Tier 1 - -|`project.openshift.io/v1` -|Tier 1 - -|`quota.openshift.io/v1` -|Tier 1 - -|`route.openshift.io/v1` -|Tier 1 - -|`quota.openshift.io/v1` -|Tier 1 - -|`security.openshift.io/v1` -|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2) - -|`template.openshift.io/v1` -|Tier 1 - -|`console.openshift.io/v1` -|Tier 2 - -|=== -endif::microshift[] - -ifdef::microshift[] -[id="microshift-mapping-support-tiers-to-openshift-api-groups_{context}"] -== Support for OpenShift API groups -API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`route.openshift.io/v1` -|Tier 1 - -|`security.openshift.io/v1` -|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2) - -|=== -endif::microshift[] - -ifndef::microshift[] -[id="mapping-support-tiers-to-monitoring-api-groups_{context}"] -== Support for Monitoring API groups - -API groups that end with the suffix `monitoring.coreos.com` have the following mapping: - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`v1` -|Tier 1 - -|=== -endif::microshift[] \ No newline at end of file diff --git a/modules/api-support-tiers.adoc b/modules/api-support-tiers.adoc deleted file mode 100644 index 7b83cf1e81f7..000000000000 --- a/modules/api-support-tiers.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-tiers_{context}"] -= API tiers - -All commercially supported APIs, components, and features are associated under one of the following support levels: - -[discrete] -[id="api-tier-1_{context}"] -== API tier 1 -APIs and application operating environments (AOEs) are stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -[discrete] -[id="api-tier-2_{context}"] -== API tier 2 -APIs and AOEs are stable within a major release for a minimum of 9 months or 3 minor releases from the announcement of deprecation, whichever is longer. - -[discrete] -[id="api-tier-3_{context}"] -== API tier 3 -This level applies to languages, tools, applications, and optional Operators included with {product-title} through Operator Hub. Each component will specify a lifetime during which the API and AOE will be supported. Newer versions of language runtime specific components will attempt to be as API and AOE compatible from minor version to minor version as possible. Minor version to minor version compatibility is not guaranteed, however. - -Components and developer tools that receive continuous updates through the Operator Hub, referred to as Operators and operands, should be considered API tier 3. Developers should use caution and understand how these components may change with each minor release. Users are encouraged to consult the compatibility guidelines documented by the component. - -[discrete] -[id="api-tier-4_{context}"] -== API tier 4 -No compatibility is provided. API and AOE can change at any point. These capabilities should not be used by applications needing long-term support. - -It is common practice for Operators to use custom resource definitions (CRDs) internally to accomplish a task. These objects are not meant for use by actors external to the Operator and are intended to be hidden. If any CRD is not meant for use by actors external to the Operator, the `operators.operatorframework.io/internal-objects` annotation in the Operators `ClusterServiceVersion` (CSV) should be specified to signal that the corresponding resource is internal use only and the CRD may be explicitly labeled as tier 4. diff --git a/modules/apiserversource-kn.adoc b/modules/apiserversource-kn.adoc deleted file mode 100644 index 53c684393991..000000000000 --- a/modules/apiserversource-kn.adoc +++ /dev/null @@ -1,156 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/event-sources/serverless-apiserversource.adoc -// * serverless/reference/kn-eventing-ref.adoc - -:_content-type: PROCEDURE -[id="apiserversource-kn_{context}"] -= Creating an API server source by using the Knative CLI - -You can use the `kn source apiserver create` command to create an API server source by using the `kn` CLI. Using the `kn` CLI to create an API server source provides a more streamlined and intuitive user interface than modifying YAML files directly. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on the cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* You have installed the OpenShift CLI (`oc`). -* You have installed the Knative (`kn`) CLI. - -.Procedure - -include::snippets/serverless-service-account-apiserversource.adoc[] - -. Create an API server source that has an event sink. In the following example, the sink is a broker: -+ -[source,terminal] ----- -$ kn source apiserver create --sink broker: --resource "event:v1" --service-account --mode Resource ----- -// need to revisit these docs and give better tutorial examples with different sinks; out of scope for the current PR - -. To check that the API server source is set up correctly, create a Knative service that dumps incoming messages to its log: -+ -[source,terminal] ----- -$ kn service create --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. If you used a broker as an event sink, create a trigger to filter events from the `default` broker to the service: -+ -[source,terminal] ----- -$ kn trigger create --sink ksvc: ----- - -. Create events by launching a pod in the default namespace: -+ -[source,terminal] ----- -$ oc create deployment hello-node --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. Check that the controller is mapped correctly by inspecting the output generated by the following command: -+ -[source,terminal] ----- -$ kn source apiserver describe ----- -+ -.Example output -[source,terminal] ----- -Name: mysource -Namespace: default -Annotations: sources.knative.dev/creator=developer, sources.knative.dev/lastModifier=developer -Age: 3m -ServiceAccountName: events-sa -Mode: Resource -Sink: - Name: default - Namespace: default - Kind: Broker (eventing.knative.dev/v1) -Resources: - Kind: event (v1) - Controller: false -Conditions: - OK TYPE AGE REASON - ++ Ready 3m - ++ Deployed 3m - ++ SinkProvided 3m - ++ SufficientPermissions 3m - ++ EventTypesProvided 3m ----- - -.Verification - -You can verify that the Kubernetes events were sent to Knative by looking at the message dumper function logs. - -. Get the pods: -+ -[source,terminal] ----- -$ oc get pods ----- - -. View the message dumper function logs for the pods: -+ -[source,terminal] ----- -$ oc logs $(oc get pod -o name | grep event-display) -c user-container ----- -+ -.Example output -[source,terminal] ----- -☁️ cloudevents.Event -Validation: valid -Context Attributes, - specversion: 1.0 - type: dev.knative.apiserver.resource.update - datacontenttype: application/json - ... -Data, - { - "apiVersion": "v1", - "involvedObject": { - "apiVersion": "v1", - "fieldPath": "spec.containers{hello-node}", - "kind": "Pod", - "name": "hello-node", - "namespace": "default", - ..... - }, - "kind": "Event", - "message": "Started container", - "metadata": { - "name": "hello-node.159d7608e3a3572c", - "namespace": "default", - .... - }, - "reason": "Started", - ... - } ----- - -.Deleting the API server source - -. Delete the trigger: -+ -[source,terminal] ----- -$ kn trigger delete ----- - -. Delete the event source: -+ -[source,terminal] ----- -$ kn source apiserver delete ----- - -. Delete the service account, cluster role, and cluster binding: -+ -[source,terminal] ----- -$ oc delete -f authentication.yaml ----- diff --git a/modules/apiserversource-yaml.adoc b/modules/apiserversource-yaml.adoc deleted file mode 100644 index 6cacbe9cc46f..000000000000 --- a/modules/apiserversource-yaml.adoc +++ /dev/null @@ -1,217 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/event-sources/serverless-apiserversource.adoc - -:_content-type: PROCEDURE -[id="apiserversource-yaml_context"] -= Creating an API server source by using YAML files - -Creating Knative resources by using YAML files uses a declarative API, which enables you to describe event sources declaratively and in a reproducible manner. To create an API server source by using YAML, you must create a YAML file that defines an `ApiServerSource` object, then apply it by using the `oc apply` command. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on the cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* You have created the `default` broker in the same namespace as the one defined in the API server source YAML file. -* Install the OpenShift CLI (`oc`). - -.Procedure - -include::snippets/serverless-service-account-apiserversource.adoc[] - -. Create an API server source as a YAML file: -+ -[source,yaml] ----- -apiVersion: sources.knative.dev/v1alpha1 -kind: ApiServerSource -metadata: - name: testevents -spec: - serviceAccountName: events-sa - mode: Resource - resources: - - apiVersion: v1 - kind: Event - sink: - ref: - apiVersion: eventing.knative.dev/v1 - kind: Broker - name: default ----- - -. Apply the `ApiServerSource` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. To check that the API server source is set up correctly, create a Knative service as a YAML file that dumps incoming messages to its log: -+ -[source,yaml] ----- -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: event-display - namespace: default -spec: - template: - spec: - containers: - - image: quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. Apply the `Service` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. Create a `Trigger` object as a YAML file that filters events from the `default` broker to the service created in the previous step: -+ -[source,yaml] ----- -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: event-display-trigger - namespace: default -spec: - broker: default - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: event-display ----- - -. Apply the `Trigger` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. Create events by launching a pod in the default namespace: -+ -[source,terminal] ----- -$ oc create deployment hello-node --image=quay.io/openshift-knative/knative-eventing-sources-event-display ----- - -. Check that the controller is mapped correctly, by entering the following command and inspecting the output: -+ -[source,terminal] ----- -$ oc get apiserversource.sources.knative.dev testevents -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: sources.knative.dev/v1alpha1 -kind: ApiServerSource -metadata: - annotations: - creationTimestamp: "2020-04-07T17:24:54Z" - generation: 1 - name: testevents - namespace: default - resourceVersion: "62868" - selfLink: /apis/sources.knative.dev/v1alpha1/namespaces/default/apiserversources/testevents2 - uid: 1603d863-bb06-4d1c-b371-f580b4db99fa -spec: - mode: Resource - resources: - - apiVersion: v1 - controller: false - controllerSelector: - apiVersion: "" - kind: "" - name: "" - uid: "" - kind: Event - labelSelector: {} - serviceAccountName: events-sa - sink: - ref: - apiVersion: eventing.knative.dev/v1 - kind: Broker - name: default ----- - -.Verification - -To verify that the Kubernetes events were sent to Knative, you can look at the message dumper function logs. - -. Get the pods by entering the following command: -+ -[source,terminal] ----- -$ oc get pods ----- -. View the message dumper function logs for the pods by entering the following command: -+ -[source,terminal] ----- -$ oc logs $(oc get pod -o name | grep event-display) -c user-container ----- -+ -.Example output -[source,terminal] ----- -☁️ cloudevents.Event -Validation: valid -Context Attributes, - specversion: 1.0 - type: dev.knative.apiserver.resource.update - datacontenttype: application/json - ... -Data, - { - "apiVersion": "v1", - "involvedObject": { - "apiVersion": "v1", - "fieldPath": "spec.containers{hello-node}", - "kind": "Pod", - "name": "hello-node", - "namespace": "default", - ..... - }, - "kind": "Event", - "message": "Started container", - "metadata": { - "name": "hello-node.159d7608e3a3572c", - "namespace": "default", - .... - }, - "reason": "Started", - ... - } ----- - -.Deleting the API server source - -. Delete the trigger: -+ -[source,terminal] ----- -$ oc delete -f trigger.yaml ----- - -. Delete the event source: -+ -[source,terminal] ----- -$ oc delete -f k8s-events.yaml ----- - -. Delete the service account, cluster role, and cluster binding: -+ -[source,terminal] ----- -$ oc delete -f authentication.yaml ----- diff --git a/modules/application-health-about.adoc b/modules/application-health-about.adoc deleted file mode 100644 index 2124f13ba458..000000000000 --- a/modules/application-health-about.adoc +++ /dev/null @@ -1,210 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/application-health.adoc - -:_content-type: CONCEPT -[id="application-health-about_{context}"] -= Understanding health checks - -A health check periodically performs diagnostics on a -running container using any combination of the readiness, liveness, and startup health checks. - -You can include one or more probes in the specification for the pod that contains the container which you want to perform the health checks. - -[NOTE] -==== -If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. -==== - -Readiness probe:: -A _readiness probe_ determines if a container is ready to accept service requests. If -the readiness probe fails for a container, the kubelet removes the pod from the list of available service endpoints. -+ -After a failure, the probe continues to examine the pod. If the pod becomes available, the kubelet adds the pod to the list of available service endpoints. - -Liveness health check:: -A _liveness probe_ determines if a container is still -running. If the liveness probe fails due to a condition such as a deadlock, the kubelet kills the container. The pod then -responds based on its restart policy. -+ -For example, a liveness probe on a pod with a `restartPolicy` of `Always` or `OnFailure` -kills and restarts the container. - -Startup probe:: -A _startup probe_ indicates whether the application within a container is started. All other probes are disabled until the startup succeeds. If the startup probe does not succeed within a specified time period, the kubelet kills the container, and the container is subject to the pod `restartPolicy`. -+ -Some applications can require additional startup time on their first initialization. You can use a startup probe with a liveness or readiness probe to delay that probe long enough to handle lengthy start-up time using the `failureThreshold` and `periodSeconds` parameters. -+ -For example, you can add a startup probe, with a `failureThreshold` of 30 failures and a `periodSeconds` of 10 seconds (30 * 10s = 300s) for a maximum of 5 minutes, to a liveness probe. After the startup probe succeeds the first time, the liveness probe takes over. - -You can configure liveness, readiness, and startup probes with any of the following types of tests: - -* HTTP `GET`: When using an HTTP `GET` test, the test determines the healthiness of the container by using a web hook. The test is successful if the HTTP response code is between `200` and `399`. -+ -You can use an HTTP `GET` test with applications that return HTTP status codes when completely initialized. - -* Container Command: When using a container command test, the probe executes a command inside the container. The probe is successful if the test exits with a `0` status. - -* TCP socket: When using a TCP socket test, the probe attempts to open a socket to the container. The container is only -considered healthy if the probe can establish a connection. You can use a TCP socket test with applications that do not start listening until -initialization is complete. - -You can configure several fields to control the behavior of a probe: - -* `initialDelaySeconds`: The time, in seconds, after the container starts before the probe can be scheduled. The default is 0. -* `periodSeconds`: The delay, in seconds, between performing probes. The default is `10`. This value must be greater than `timeoutSeconds`. -* `timeoutSeconds`: The number of seconds of inactivity after which the probe times out and the container is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -* `successThreshold`: The number of times that the probe must report success after a failure to reset the container status to successful. The value must be `1` for a liveness probe. The default is `1`. -* `failureThreshold`: The number of times that the probe is allowed to fail. The default is 3. After the specified attempts: -** for a liveness probe, the container is restarted -** for a readiness probe, the pod is marked `Unready` -** for a startup probe, the container is killed and is subject to the pod's `restartPolicy` - -[discrete] -[id="application-health-examples"] -== Example probes - -The following are samples of different probes as they would appear in an object specification. - -.Sample readiness probe with a container command readiness probe in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - readinessProbe: <3> - exec: <4> - command: <5> - - cat - - /tmp/healthy -... ----- - -<1> The container name. -<2> The container image to deploy. -<3> A readiness probe. -<4> A container command test. -<5> The commands to execute on the container. - -.Sample container command startup probe and liveness probe with container command tests in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - httpGet: <4> - scheme: HTTPS <5> - path: /healthz - port: 8080 <6> - httpHeaders: - - name: X-Custom-Header - value: Awesome - startupProbe: <7> - httpGet: <8> - path: /healthz - port: 8080 <9> - failureThreshold: 30 <10> - periodSeconds: 10 <11> -... ----- - -<1> The container name. -<2> Specify the container image to deploy. -<3> A liveness probe. -<4> An HTTP `GET` test. -<5> The internet scheme: `HTTP` or `HTTPS`. The default value is `HTTP`. -<6> The port on which the container is listening. -<7> A startup probe. -<8> An HTTP `GET` test. -<9> The port on which the container is listening. -<10> The number of times to try the probe after a failure. -<11> The number of seconds to perform the probe. - -.Sample liveness probe with a container command test that uses a timeout in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - exec: <4> - command: <5> - - /bin/bash - - '-c' - - timeout 60 /opt/eap/bin/livenessProbe.sh - periodSeconds: 10 <6> - successThreshold: 1 <7> - failureThreshold: 3 <8> -... ----- - -<1> The container name. -<2> Specify the container image to deploy. -<3> The liveness probe. -<4> The type of probe, here a container command probe. -<5> The command line to execute inside the container. -<6> How often in seconds to perform the probe. -<7> The number of consecutive successes needed to show success after a failure. -<8> The number of times to try the probe after a failure. - -.Sample readiness probe and liveness probe with a TCP socket test in a deployment -[source,yaml] ----- -kind: Deployment -apiVersion: apps/v1 -... -spec: -... - template: - spec: - containers: - - resources: {} - readinessProbe: <1> - tcpSocket: - port: 8080 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - terminationMessagePath: /dev/termination-log - name: ruby-ex - livenessProbe: <2> - tcpSocket: - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 -... ----- -<1> The readiness probe. -<2> The liveness probe. diff --git a/modules/application-health-configuring.adoc b/modules/application-health-configuring.adoc deleted file mode 100644 index fe308a5dc9d8..000000000000 --- a/modules/application-health-configuring.adoc +++ /dev/null @@ -1,136 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/application-health.adoc - -:_content-type: PROCEDURE -[id="application-health-configuring_{context}"] -= Configuring health checks using the CLI - -To configure readiness, liveness, and startup probes, add one or more probes to the specification for the pod that contains the container which you want to perform the health checks - -[NOTE] -==== -If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. -==== - -.Procedure - -To add probes for a container: - -. Create a `Pod` object to add one or more probes: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -spec: - containers: - - name: my-container <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - tcpSocket: <4> - port: 8080 <5> - initialDelaySeconds: 15 <6> - periodSeconds: 20 <7> - timeoutSeconds: 10 <8> - readinessProbe: <9> - httpGet: <10> - host: my-host <11> - scheme: HTTPS <12> - path: /healthz - port: 8080 <13> - startupProbe: <14> - exec: <15> - command: <16> - - cat - - /tmp/healthy - failureThreshold: 30 <17> - periodSeconds: 20 <18> - timeoutSeconds: 10 <19> ----- -<1> Specify the container name. -<2> Specify the container image to deploy. -<3> Optional: Create a Liveness probe. -<4> Specify a test to perform, here a TCP Socket test. -<5> Specify the port on which the container is listening. -<6> Specify the time, in seconds, after the container starts before the probe can be scheduled. -<7> Specify the number of seconds to perform the probe. The default is `10`. This value must be greater than `timeoutSeconds`. -<8> Specify the number of seconds of inactivity after which the probe is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -<9> Optional: Create a Readiness probe. -<10> Specify the type of test to perform, here an HTTP test. -<11> Specify a host IP address. When `host` is not defined, the `PodIP` is used. -<12> Specify `HTTP` or `HTTPS`. When `scheme` is not defined, the `HTTP` scheme is used. -<13> Specify the port on which the container is listening. -<14> Optional: Create a Startup probe. -<15> Specify the type of test to perform, here an Container Execution probe. -<16> Specify the commands to execute on the container. -<17> Specify the number of times to try the probe after a failure. -<18> Specify the number of seconds to perform the probe. The default is `10`. This value must be greater than `timeoutSeconds`. -<19> Specify the number of seconds of inactivity after which the probe is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -+ -[NOTE] -==== -If the `initialDelaySeconds` value is lower than the `periodSeconds` value, the first Readiness probe occurs at some point between the two periods due to an issue with timers. - -The `timeoutSeconds` value must be lower than the `periodSeconds` value. -==== - -. Create the `Pod` object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. Verify the state of the health check pod: -+ -[source,terminal] ----- -$ oc describe pod health-check ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 9s default-scheduler Successfully assigned openshift-logging/liveness-exec to ip-10-0-143-40.ec2.internal - Normal Pulling 2s kubelet, ip-10-0-143-40.ec2.internal pulling image "registry.k8s.io/liveness" - Normal Pulled 1s kubelet, ip-10-0-143-40.ec2.internal Successfully pulled image "registry.k8s.io/liveness" - Normal Created 1s kubelet, ip-10-0-143-40.ec2.internal Created container - Normal Started 1s kubelet, ip-10-0-143-40.ec2.internal Started container ----- -+ -The following is the output of a failed probe that restarted a container: -+ -.Sample Liveness check output with unhealthy container -[source,terminal] ----- -$ oc describe pod pod1 ----- -+ -.Example output -[source,terminal] ----- -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled Successfully assigned aaa/liveness-http to ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj - Normal AddedInterface 47s multus Add eth0 [10.129.2.11/23] - Normal Pulled 46s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 773.406244ms - Normal Pulled 28s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 233.328564ms - Normal Created 10s (x3 over 46s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Created container liveness - Normal Started 10s (x3 over 46s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Started container liveness - Warning Unhealthy 10s (x6 over 34s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Liveness probe failed: HTTP probe failed with statuscode: 500 - Normal Killing 10s (x2 over 28s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Container liveness failed liveness probe, will be restarted - Normal Pulling 10s (x3 over 47s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Pulling image "registry.k8s.io/liveness" - Normal Pulled 10s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 244.116568ms ----- diff --git a/modules/applications-create-using-cli-image.adoc b/modules/applications-create-using-cli-image.adoc deleted file mode 100644 index 09570b85efda..000000000000 --- a/modules/applications-create-using-cli-image.adoc +++ /dev/null @@ -1,38 +0,0 @@ -[id="applications-create-using-cli-image_{context}"] -= Creating an application from an image - -You can deploy an application from an existing image. Images can come from image streams in the {product-title} server, images in a specific registry, or images in the local Docker server. - -The `new-app` command attempts to determine the type of image specified in the arguments passed to it. However, you can explicitly tell `new-app` whether the image is a container image using the `--docker-image` argument or an image stream using the `-i|--image-stream` argument. - -[NOTE] -==== -If you specify an image from your local Docker repository, you must ensure that the same image is available to the {product-title} cluster nodes. -==== - -== Docker Hub MySQL image - -Create an application from the Docker Hub MySQL image, for example: - -[source,terminal] ----- -$ oc new-app mysql ----- - -== Image in a private registry - -Create an application using an image in a private registry, specify the full container image specification: - -[source,terminal] ----- -$ oc new-app myregistry:5000/example/myimage ----- - -== Existing image stream and optional image stream tag - -Create an application from an existing image stream and optional image stream tag: - -[source,terminal] ----- -$ oc new-app my-stream:v1 ----- diff --git a/modules/applications-create-using-cli-modify.adoc b/modules/applications-create-using-cli-modify.adoc deleted file mode 100644 index f5975d0617c4..000000000000 --- a/modules/applications-create-using-cli-modify.adoc +++ /dev/null @@ -1,225 +0,0 @@ -[id="applications-create-using-cli-modify_{context}"] -= Modifying application creation - -The `new-app` command generates {product-title} objects that build, deploy, and run the application that is created. Normally, these objects are created in the current project and assigned names that are derived from the input source repositories or the input images. However, with `new-app` you can modify this behavior. - -.`new-app` output objects -[cols="2,8",options="header"] -|=== - -|Object |Description - -|`BuildConfig` -|A `BuildConfig` object is created for each source repository that is specified in the command line. The `BuildConfig` object specifies the strategy to use, the source location, and the build output location. - -|`ImageStreams` -|For the `BuildConfig` object, two image streams are usually created. One represents the input image. With source builds, this is the builder image. -ifndef::openshift-online[] -With `Docker` builds, this is the *FROM* image. -endif::[] -The second one represents the output image. If a container image was specified as input to `new-app`, then an image stream is created for that image as well. - -|`DeploymentConfig` -|A `DeploymentConfig` object is created either to deploy the output of a build, or a specified image. The `new-app` command creates `emptyDir` volumes for all Docker volumes that are specified in containers included in the resulting `DeploymentConfig` object . - -|`Service` -|The `new-app` command attempts to detect exposed ports in input images. It uses the lowest numeric exposed port to generate a service that exposes that port. To expose a different port, after `new-app` has completed, simply use the `oc expose` command to generate additional services. - -|Other -|Other objects can be generated when instantiating templates, according to the template. - -|=== - -[id="specifying-environment-variables"] -== Specifying environment variables - -When generating applications from a template, source, or an image, you can use the `-e|--env` argument to pass environment variables to the application container at run time: - -[source,terminal] ----- -$ oc new-app openshift/postgresql-92-centos7 \ - -e POSTGRESQL_USER=user \ - -e POSTGRESQL_DATABASE=db \ - -e POSTGRESQL_PASSWORD=password ----- - -The variables can also be read from file using the `--env-file` argument. The following is an example file called `postgresql.env`: - -[source,terminal] ----- -POSTGRESQL_USER=user -POSTGRESQL_DATABASE=db -POSTGRESQL_PASSWORD=password ----- - -Read the variables from the file: - -[source,terminal] ----- -$ oc new-app openshift/postgresql-92-centos7 --env-file=postgresql.env ----- - -Additionally, environment variables can be given on standard input by using `--env-file=-`: - -[source,terminal] ----- -$ cat postgresql.env | oc new-app openshift/postgresql-92-centos7 --env-file=- ----- - -[NOTE] -==== -Any `BuildConfig` objects created as part of `new-app` processing are not updated with environment variables passed with the `-e|--env` or `--env-file` argument. -==== - -[id="specifying-build-environment-variables"] -== Specifying build environment variables - -When generating applications from a template, source, or an image, you can use the `--build-env` argument to pass environment variables to the build container at run time: - -[source,terminal] ----- -$ oc new-app openshift/ruby-23-centos7 \ - --build-env HTTP_PROXY=http://myproxy.net:1337/ \ - --build-env GEM_HOME=~/.gem ----- - -The variables can also be read from a file using the `--build-env-file` argument. The following is an example file called `ruby.env`: - -[source,terminal] ----- -HTTP_PROXY=http://myproxy.net:1337/ -GEM_HOME=~/.gem ----- - -Read the variables from the file: - -[source,terminal] ----- -$ oc new-app openshift/ruby-23-centos7 --build-env-file=ruby.env ----- - -Additionally, environment variables can be given on standard input by using `--build-env-file=-`: - -[source,terminal] ----- -$ cat ruby.env | oc new-app openshift/ruby-23-centos7 --build-env-file=- ----- - -[id="specifying-labels"] -== Specifying labels - -When generating applications from source, images, or templates, you can use the `-l|--label` argument to add labels to the created objects. Labels make it easy to collectively select, configure, and delete objects associated with the application. - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world -l name=hello-world ----- - -[id="viewing-output-without-creation"] -== Viewing the output without creation - -To see a dry-run of running the `new-app` command, you can use the `-o|--output` argument with a `yaml` or `json` value. You can then use the output to preview the objects that are created or redirect it to a file that you can edit. After you are satisfied, you can use `oc create` to create the {product-title} objects. - -To output `new-app` artifacts to a file, run the following: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world \ - -o yaml > myapp.yaml ----- - -Edit the file: - -[source,terminal] ----- -$ vi myapp.yaml ----- - -Create a new application by referencing the file: - -[source,terminal] ----- -$ oc create -f myapp.yaml ----- - -[id="creating-objects-different-names"] -== Creating objects with different names - -Objects created by `new-app` are normally named after the source repository, or the image used to generate them. You can set the name of the objects produced by adding a `--name` flag to the command: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world --name=myapp ----- - -[id="creating-objects-different-project"] -== Creating objects in a different project - -Normally, `new-app` creates objects in the current project. However, you can create objects in a different project by using the `-n|--namespace` argument: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world -n myproject ----- - -[id="creating-multiple-objects"] -== Creating multiple objects - -The `new-app` command allows creating multiple applications specifying multiple parameters to `new-app`. Labels specified in the command line apply to all objects created by the single command. Environment variables apply to all components created from source or images. - -To create an application from a source repository and a Docker Hub image: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world mysql ----- - -[NOTE] -==== -If a source code repository and a builder image are specified as separate arguments, `new-app` uses the builder image as the builder for the source code repository. If this is not the intent, specify the required builder image for the source using the `~` separator. -==== - -[id="grouping-images-source-single-pod"] -== Grouping images and source in a single pod - -The `new-app` command allows deploying multiple images together in a single pod. To specify which images to group together, use the `+` separator. The `--group` command line argument can also be used to specify the images that should be grouped together. To group the image built from a source repository with other images, specify its builder image in the group: - -[source,terminal] ----- -$ oc new-app ruby+mysql ----- - -To deploy an image built from source and an external image together: - -[source,terminal] ----- -$ oc new-app \ - ruby~https://github.com/openshift/ruby-hello-world \ - mysql \ - --group=ruby+mysql ----- - -[id="searching-for-images-templates-other-inputs"] -== Searching for images, templates, and other inputs - -To search for images, templates, and other inputs for the `oc new-app` command, add the `--search` and `--list` flags. For example, to find all of the images or templates that include PHP: - -[source,terminal] ----- -$ oc new-app --search php ----- - -[id="setting-the-import-mode"] -== Setting the import mode - -To set the import mode when using `oc new-app`, add the `--import-mode` flag. This flag can be appended with `Legacy` or `PreserveOriginal`, which provides users the option to create image streams using a single sub-manifest, or all manifests, respectively. - -[souce,terminal] ----- -$ oc new-app --image=registry.redhat.io/ubi8/httpd-24:latest --import-mode=Legacy --name=test ----- - -[source,terminal] ----- -$ oc new-app --image=registry.redhat.io/ubi8/httpd-24:latest --import-mode=PreserveOriginal --name=test ----- diff --git a/modules/applications-create-using-cli-source-code.adoc b/modules/applications-create-using-cli-source-code.adoc deleted file mode 100644 index fd7b10132c08..000000000000 --- a/modules/applications-create-using-cli-source-code.adoc +++ /dev/null @@ -1,153 +0,0 @@ -[id="applications-create-using-cli-source-code_{context}"] -= Creating an application from source code - -With the `new-app` command you can create applications from source code in a local or remote Git repository. - -The `new-app` command creates a build configuration, which itself creates a new application image from your source code. The `new-app` command typically also creates a `Deployment` object to deploy the new image, and a service to provide load-balanced access to the deployment running your image. - -{product-title} automatically detects whether the pipeline, source, or docker build strategy should be used, and in the case of source build, detects an appropriate language builder image. - -[id="local_{context}"] -== Local - -To create an application from a Git repository in a local directory: - -[source,terminal] ----- -$ oc new-app / ----- - -[NOTE] -==== -If you use a local Git repository, the repository must have a remote named `origin` that points to a URL that is accessible by the {product-title} cluster. If there is no recognized remote, running the `new-app` command will create a binary build. -==== - -[id="remote_{context}"] -== Remote - -To create an application from a remote Git repository: - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/cakephp-ex ----- - -To create an application from a private remote Git repository: - -[source,terminal] ----- -$ oc new-app https://github.com/youruser/yourprivaterepo --source-secret=yoursecret ----- - -[NOTE] -==== -If you use a private remote Git repository, you can use the `--source-secret` flag to specify an existing source clone secret that will get injected into your build config to access the repository. -==== - -You can use a subdirectory of your source code repository by specifying a `--context-dir` flag. To create an application from a remote Git repository and a context subdirectory: - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/s2i-ruby-container.git \ - --context-dir=2.0/test/puma-test-app ----- - -Also, when specifying a remote URL, you can specify a Git branch to use by appending `#` to the end of the URL: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world.git#beta4 ----- - -[id="build-strategy-detection_{context}"] -== Build strategy detection - -{product-title} automatically determines which build strategy to use by detecting certain files: - -* If a Jenkins file exists in the root or specified context directory of the source repository when creating a new application, {product-title} generates a pipeline build strategy. -+ -[NOTE] -==== -The `pipeline` build strategy is deprecated; consider using {pipelines-title} instead. -==== -* If a Dockerfile exists in the root or specified context directory of the source repository when creating a new application, {product-title} generates a docker build strategy. -* If neither a Jenkins file nor a Dockerfile is detected, {product-title} generates a source build strategy. - -Override the automatically detected build strategy by setting the `--strategy` flag to `docker`, `pipeline`, or `source`. - -[source,terminal] ----- -$ oc new-app /home/user/code/myapp --strategy=docker ----- - -[NOTE] -==== -The `oc` command requires that files containing build sources are available in a remote Git repository. For all source builds, you must use `git remote -v`. -==== - -[id="language-detection_{context}"] -== Language detection - -If you use the source build strategy, `new-app` attempts to determine the language builder to use by the presence of certain files in the root or specified context directory of the repository: - -.Languages detected by `new-app` -[cols="4,8",options="header"] -|=== - -|Language |Files -ifdef::openshift-enterprise,openshift-webscale,openshift-aro,openshift-online[] -|`dotnet` -|`project.json`, `pass:[*.csproj]` -endif::[] -|`jee` -|`pom.xml` - -|`nodejs` -|`app.json`, `package.json` - -|`perl` -|`cpanfile`, `index.pl` - -|`php` -|`composer.json`, `index.php` - -|`python` -|`requirements.txt`, `setup.py` - -|`ruby` -|`Gemfile`, `Rakefile`, `config.ru` - -|`scala` -|`build.sbt` - -|`golang` -|`Godeps`, `main.go` -|=== - -After a language is detected, `new-app` searches the {product-title} server for image stream tags that have a `supports` annotation matching the detected language, or an image stream that matches the name of the detected language. If a match is not found, `new-app` searches the link:https://registry.hub.docker.com[Docker Hub registry] for an image that matches the detected language based on name. - -You can override the image the builder uses for a particular source repository by specifying the image, either an image stream or container -specification, and the repository with a `~` as a separator. Note that if this is done, build strategy detection and language detection are not carried out. - -For example, to use the `myproject/my-ruby` imagestream with the source in a remote repository: - -[source,terminal] ----- -$ oc new-app myproject/my-ruby~https://github.com/openshift/ruby-hello-world.git ----- - -To use the `openshift/ruby-20-centos7:latest` container image stream with the source in a local repository: - -[source,terminal] ----- -$ oc new-app openshift/ruby-20-centos7:latest~/home/user/code/my-ruby-app ----- - -[NOTE] -==== -Language detection requires the Git client to be locally installed so that your repository can be cloned and inspected. If Git is not available, you can avoid the language detection step by specifying the builder image to use with your repository with the `~` syntax. - -The `-i ` invocation requires that `new-app` attempt to clone `repository` to determine what type of artifact it is, so this will fail if Git is not available. - -The `-i --code ` invocation requires `new-app` clone `repository` to determine whether `image` should be used as a builder for the source code, or deployed separately, as in the case of a database image. -==== diff --git a/modules/applications-create-using-cli-template.adoc b/modules/applications-create-using-cli-template.adoc deleted file mode 100644 index d50a7837da31..000000000000 --- a/modules/applications-create-using-cli-template.adoc +++ /dev/null @@ -1,53 +0,0 @@ -[id="applications-create-using-cli-template_{context}"] -= Creating an application from a template - -You can create an application from a previously stored template or from a -template file, by specifying the name of the template as an argument. For -example, you can store a sample application template and use it to create an -application. - -Upload an application template to your current project's template library. The following example uploads an application template from a file called `examples/sample-app/application-template-stibuild.json`: - -[source,terminal] ----- -$ oc create -f examples/sample-app/application-template-stibuild.json ----- - -Then create a new application by referencing the application template. In this example, the template name is `ruby-helloworld-sample`: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample ----- - -To create a new application by referencing a template file in your local file system, without first storing it in {product-title}, use the `-f|--file` argument. For example: - -[source,terminal] ----- -$ oc new-app -f examples/sample-app/application-template-stibuild.json ----- - -== Template parameters - -When creating an application based on a template, use the `-p|--param` argument to set parameter values that are defined by the template: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample \ - -p ADMIN_USERNAME=admin -p ADMIN_PASSWORD=mypassword ----- - -You can store your parameters in a file, then use that file with `--param-file` when instantiating a template. If you want to read the parameters from standard input, use `--param-file=-`. The following is an example file called `helloworld.params`: - -[source,terminal] ----- -ADMIN_USERNAME=admin -ADMIN_PASSWORD=mypassword ----- - -Reference the parameters in the file when instantiating a template: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample --param-file=helloworld.params ----- diff --git a/modules/applying-custom-seccomp-profile.adoc b/modules/applying-custom-seccomp-profile.adoc deleted file mode 100644 index 32c17f98f639..000000000000 --- a/modules/applying-custom-seccomp-profile.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: PROCEDURE -[id="applying-custom-seccomp-profile_{context}"] -= Applying the custom seccomp profile to the workload - -.Prerequisite -* The cluster administrator has set up the custom seccomp profile. For more details, see "Setting up the custom seccomp profile". - -.Procedure -* Apply the seccomp profile to the workload by setting the `securityContext.seccompProfile.type` field as following: -+ -.Example -+ -[source, yaml] ----- -spec: - securityContext: - seccompProfile: - type: Localhost - localhostProfile: .json <1> ----- -<1> Provide the name of your custom seccomp profile. -+ -Alternatively, you can use the pod annotations `seccomp.security.alpha.kubernetes.io/pod: localhost/.json`. However, this method is deprecated in {product-title} {product-version}. diff --git a/modules/arch-cluster-operators.adoc b/modules/arch-cluster-operators.adoc deleted file mode 100644 index 3097bfc1aebb..000000000000 --- a/modules/arch-cluster-operators.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="cluster-operators_{context}"] -= Cluster Operators - -In {product-title}, all cluster functions are divided into a series of default _cluster Operators_. Cluster Operators manage a particular area of cluster functionality, such as cluster-wide application logging, management of the Kubernetes control plane, or the machine provisioning system. - -Cluster Operators are represented by a `ClusterOperator` object, which cluster administrators can view in the {product-title} web console from the *Administration* -> *Cluster Settings* page. Each cluster Operator provides a simple API for determining cluster functionality. The Operator hides the details of managing the lifecycle of that component. Operators can manage a single component or tens of components, but the end goal is always to reduce operational burden by automating common actions. diff --git a/modules/arch-olm-operators.adoc b/modules/arch-olm-operators.adoc deleted file mode 100644 index 6681f67f1c91..000000000000 --- a/modules/arch-olm-operators.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="olm-operators_{context}"] -= Add-on Operators - -Operator Lifecycle Manager (OLM) and OperatorHub are default components in {product-title} that help manage Kubernetes-native applications as Operators. Together they provide the system for discovering, installing, and managing the optional add-on Operators available on the cluster. - -Using OperatorHub in the {product-title} web console, cluster administrators and authorized users can select Operators to install from catalogs of Operators. After installing an Operator from OperatorHub, it can be made available globally or in specific namespaces to run in user applications. - -Default catalog sources are available that include Red Hat Operators, certified Operators, and community Operators. Cluster administrators can also add their own custom catalog sources, which can contain a custom set of Operators. - -Developers can use the Operator SDK to help author custom Operators that take advantage of OLM features, as well. Their Operator can then be bundled and added to a custom catalog source, which can be added to a cluster and made available to users. - -[NOTE] -==== -OLM does not manage the cluster Operators that comprise the {product-title} architecture. -==== diff --git a/modules/arch-platform-operators.adoc b/modules/arch-platform-operators.adoc deleted file mode 100644 index 48b96b5c07eb..000000000000 --- a/modules/arch-platform-operators.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc -// * operators/admin/olm-managing-po.adoc - -:_content-type: CONCEPT - -ifeval::["{context}" == "control-plane"] -[id="platform-operators_{context}"] -= Platform Operators (Technology Preview) - -:FeatureName: The platform Operator type -include::snippets/technology-preview.adoc[] -endif::[] - -ifeval::["{context}" == "olm-managing-po"] -[id="platform-operators_{context}"] -= About platform Operators -endif::[] - -Operator Lifecycle Manager (OLM) introduces a new type of Operator called _platform Operators_. A platform Operator is an OLM-based Operator that can be installed during or after an {product-title} cluster's Day 0 operations and participates in the cluster's lifecycle. As a cluster administrator, you can use platform Operators to further customize your {product-title} installation to meet your requirements and use cases. - -Using the existing cluster capabilities feature in {product-title}, cluster administrators can already disable a subset of Cluster Version Operator-based (CVO) components considered non-essential to the initial payload prior to cluster installation. Platform Operators iterate on this model by providing additional customization options. Through the platform Operator mechanism, which relies on resources from the RukPak component, OLM-based Operators can now be installed at cluster installation time and can block cluster rollout if the Operator fails to install successfully. - -In {product-title} 4.12, this Technology Preview release focuses on the basic platform Operator mechanism and builds a foundation for expanding the concept in upcoming releases. You can use the cluster-wide `PlatformOperator` API to configure Operators before or after cluster creation on clusters that have enabled the `TechPreviewNoUpgrades` feature set. \ No newline at end of file diff --git a/modules/architecture-container-application-benefits.adoc b/modules/architecture-container-application-benefits.adoc deleted file mode 100644 index 5328cb429c8a..000000000000 --- a/modules/architecture-container-application-benefits.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -[id="architecture-container-application-benefits_{context}"] -= The benefits of containerized applications - -Using containerized applications offers many advantages over using traditional deployment methods. Where applications were once expected to be installed on operating systems that included all their dependencies, containers let an application carry their dependencies with them. Creating containerized applications offers many benefits. - -[id="operating-system-benefits_{context}"] -== Operating system benefits - -Containers use small, dedicated Linux operating systems without a kernel. Their file system, networking, cgroups, process tables, and namespaces are separate from the host Linux system, but the containers can integrate with the hosts seamlessly when necessary. Being based on Linux allows containers to use all the advantages that come with the open source development model of rapid innovation. - -Because each container uses a dedicated operating system, you can deploy applications that require conflicting software dependencies on the same host. Each container carries its own dependent software and manages its own interfaces, such as networking and file systems, so applications never need to compete for those assets. - -[id="deployment-scaling-benefits_{context}"] -== Deployment and scaling benefits - -If you employ rolling upgrades between major releases of your application, you can continuously improve your applications without downtime and still maintain compatibility with the current release. - -You can also deploy and test a new version of an application alongside the existing version. If the container passes your tests, simply deploy more new containers and remove the old ones.  - -Since all the software dependencies for an application are resolved within the container itself, you can use a standardized operating system on each host in your data center. You do not need to configure a specific operating system for each application host. When your data center needs more capacity, you can deploy another generic host system. - -Similarly, scaling containerized applications is simple. {product-title} offers a simple, standard way of scaling any containerized service. For example, if you build applications as a set of microservices rather than large, monolithic applications, you can scale the individual microservices individually to meet demand. This capability allows you to scale only the required services instead of the entire application, which can allow you to meet application demands while using minimal resources. diff --git a/modules/architecture-kubernetes-introduction.adoc b/modules/architecture-kubernetes-introduction.adoc deleted file mode 100644 index 8fd1f576e11a..000000000000 --- a/modules/architecture-kubernetes-introduction.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -:_content-type: CONCEPT -[id="architecture-kubernetes-introduction_{context}"] -= About Kubernetes - -Although container images and the containers that run from them are the -primary building blocks for modern application development, to run them at scale -requires a reliable and flexible distribution system. Kubernetes is the -defacto standard for orchestrating containers. - -Kubernetes is an open source container orchestration engine for automating -deployment, scaling, and management of containerized applications. The general -concept of Kubernetes is fairly simple: - -* Start with one or more worker nodes to run the container workloads. -* Manage the deployment of those workloads from one or more control plane nodes. -* Wrap containers in a deployment unit called a pod. Using pods provides extra -metadata with the container and offers the ability to group several containers -in a single deployment entity. -* Create special kinds of assets. For example, services are represented by a -set of pods and a policy that defines how they are accessed. This policy -allows containers to connect to the services that they need even if they do not -have the specific IP addresses for the services. Replication controllers are -another special asset that indicates how many pod replicas are required to run -at a time. You can use this capability to automatically scale your application -to adapt to its current demand. - -In only a few years, Kubernetes has seen massive cloud and on-premise adoption. -The open source development model allows many people to extend Kubernetes -by implementing different technologies for components such as networking, -storage, and authentication. diff --git a/modules/architecture-machine-config-pools.adoc b/modules/architecture-machine-config-pools.adoc deleted file mode 100644 index 7c700689dd9f..000000000000 --- a/modules/architecture-machine-config-pools.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="architecture-machine-config-pools_{context}"] -= Node configuration management with machine config pools - -Machines that run control plane components or user workloads are divided into groups based on the types of resources they handle. These groups of machines are called machine config pools (MCP). Each MCP manages a set of nodes and its corresponding machine configs. The role of the node determines which MCP it belongs to; the MCP governs nodes based on its assigned node role label. Nodes in an MCP have the same configuration; this means nodes can be scaled up and torn down in response to increased or decreased workloads. - -By default, there are two MCPs created by the cluster when it is installed: `master` and `worker`. Each default MCP has a defined configuration applied by the Machine Config Operator (MCO), which is responsible for managing MCPs and facilitating MCP upgrades. You can create additional MCPs, or custom pools, to manage nodes that have custom use cases that extend outside of the default node types. - -Custom pools are pools that inherit their configurations from the worker pool. They use any machine config targeted for the worker pool, but add the ability to deploy changes only targeted at the custom pool. Since a custom pool inherits its configuration from the worker pool, any change to the worker pool is applied to the custom pool as well. Custom pools that do not inherit their configurations from the worker pool are not supported by the MCO. - -[NOTE] -==== -A node can only be included in one MCP. If a node has multiple labels that correspond to several MCPs, like `worker,infra`, it is managed by the infra custom pool, not the worker pool. Custom pools take priority on selecting nodes to manage based on node labels; nodes that do not belong to a custom pool are managed by the worker pool. -==== - -It is recommended to have a custom pool for every node role you want to manage in your cluster. For example, if you create infra nodes to handle infra workloads, it is recommended to create a custom infra MCP to group those nodes together. If you apply an `infra` role label to a worker node so it has the `worker,infra` dual label, but do not have a custom infra MCP, the MCO considers it a worker node. If you remove the `worker` label from a node and apply the `infra` label without grouping it in a custom pool, the node is not recognized by the MCO and is unmanaged by the cluster. - -[IMPORTANT] -==== -Any node labeled with the `infra` role that is only running infra workloads is not counted toward the total number of subscriptions. The MCP managing an infra node is mutually exclusive from how the cluster determines subscription charges; tagging a node with the appropriate `infra` role and using taints to prevent user workloads from being scheduled on that node are the only requirements for avoiding subscription charges for infra workloads. -==== - -The MCO applies updates for pools independently; for example, if there is an update that affects all pools, nodes from each pool update in parallel with each other. If you add a custom pool, nodes from that pool also attempt to update concurrently with the master and worker nodes. - -There might be situations where the configuration on a node does not fully match what the currently-applied machine config specifies. This state is called _configuration drift_. The Machine Config Daemon (MCD) regularly checks the nodes for configuration drift. If the MCD detects configuration drift, the MCO marks the node `degraded` until an administrator corrects the node configuration. A degraded node is online and operational, but, it cannot be updated. - diff --git a/modules/architecture-machine-roles.adoc b/modules/architecture-machine-roles.adoc deleted file mode 100644 index 9243250b1bef..000000000000 --- a/modules/architecture-machine-roles.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="architecture-machine-roles_{context}"] -= Machine roles in {product-title} - -{product-title} assigns hosts different roles. These roles define the function of the machine within the cluster. The cluster contains definitions for the standard `master` and `worker` role types. - -[NOTE] -==== -The cluster also contains the definition for the `bootstrap` role. Because the bootstrap machine is used only during cluster installation, its function is explained in the cluster installation documentation. -==== - -== Control plane and node host compatibility - -The {product-title} version must match between control plane host and node host. For example, in a {product-version} cluster, all control plane hosts must be {product-version} and all nodes must be {product-version}. - -Temporary mismatches during cluster upgrades are acceptable. For example, when upgrading from the previous {product-title} version to {product-version}, some nodes will upgrade to {product-version} before others. Prolonged skewing of control plane hosts and node hosts might expose older compute machines to bugs and missing features. Users should resolve skewed control plane hosts and node hosts as soon as possible. - -The `kubelet` service must not be newer than `kube-apiserver`, and can be up to two minor versions older depending on whether your {product-title} version is odd or even. The table below shows the appropriate version compatibility: - -[cols="2",options="header"] -|=== -| {product-title} version -| Supported `kubelet` skew - - -| Odd {product-title} minor versions ^[1]^ -| Up to one version older - -| Even {product-title} minor versions ^[2]^ -| Up to two versions older -|=== -[.small] --- -1. For example, {product-title} 4.11, 4.13. -2. For example, {product-title} 4.10, 4.12. --- - -[id="defining-workers_{context}"] -== Cluster workers - -In a Kubernetes cluster, the worker nodes are where the actual workloads requested by Kubernetes users run and are managed. The worker nodes advertise their capacity and the scheduler, which a control plane service, determines on which nodes to start pods and containers. Important services run on each worker node, including CRI-O, which is the container engine; Kubelet, which is the service that accepts and fulfills requests for running and stopping container workloads; a service proxy, which manages communication for pods across workers; and the runC or crun low-level container runtime, which creates and runs containers. - -[NOTE] -==== -For information about how to enable crun instead of the default runC, see the documentation for creating a `ContainerRuntimeConfig` CR. -==== - -In {product-title}, compute machine sets control the compute machines, which are assigned the `worker` machine role. Machines with the `worker` role drive compute workloads that are governed by a specific machine pool that autoscales them. Because {product-title} has the capacity to support multiple machine types, the machines with the `worker` role are classed as _compute_ machines. In this release, the terms _worker machine_ and _compute machine_ are used interchangeably because the only default type of compute machine is the worker machine. In future versions of {product-title}, different types of compute machines, such as infrastructure machines, might be used by default. - -[NOTE] -==== -Compute machine sets are groupings of compute machine resources under the `machine-api` namespace. Compute machine sets are configurations that are designed to start new compute machines on a specific cloud provider. Conversely, machine config pools (MCPs) are part of the Machine Config Operator (MCO) namespace. An MCP is used to group machines together so the MCO can manage their configurations and facilitate their upgrades. -==== - -[id="defining-masters_{context}"] -== Cluster control planes - -In a Kubernetes cluster, the _master_ nodes run services that are required to control the Kubernetes cluster. In {product-title}, the control plane is comprised of control plane machines that have a `master` machine role. They contain more than just the Kubernetes services for managing the {product-title} cluster. - -For most {product-title} clusters, control plane machines are defined by a series of standalone machine API resources. For supported cloud provider and {product-title} version combinations, control planes can be managed with control plane machine sets. Extra controls apply to control plane machines to prevent you from deleting all control plane machines and breaking your cluster. - -[NOTE] -==== -Exactly three control plane nodes must be used for all production deployments. -==== - -Services that fall under the Kubernetes category on the control plane include the Kubernetes API server, etcd, the Kubernetes controller manager, and the Kubernetes scheduler. - -.Kubernetes services that run on the control plane -[cols="1,2",options="header"] -|=== -|Component |Description -|Kubernetes API server -|The Kubernetes API server validates and configures the data for pods, services, -and replication controllers. It also provides a focal point for the shared state of the cluster. - -|etcd -|etcd stores the persistent control plane state while other components watch etcd for -changes to bring themselves into the specified state. -//etcd can be optionally configured for high availability, typically deployed with 2n+1 peer services. - -|Kubernetes controller manager -|The Kubernetes controller manager watches etcd for changes to objects such as -replication, namespace, and service account controller objects, and then uses the -API to enforce the specified state. Several such processes create a cluster with -one active leader at a time. - -|Kubernetes scheduler -|The Kubernetes scheduler watches for newly created pods without an assigned node and selects the best node to host the pod. -|=== - -There are also OpenShift services that run on the control plane, which include the OpenShift API server, OpenShift controller manager, OpenShift OAuth API server, and OpenShift OAuth server. - -.OpenShift services that run on the control plane -[cols="1,2",options="header"] -|=== -|Component |Description -|OpenShift API server -|The OpenShift API server validates and configures the data for OpenShift resources, such as projects, routes, and templates. - -The OpenShift API server is managed by the OpenShift API Server Operator. -|OpenShift controller manager -|The OpenShift controller manager watches etcd for changes to OpenShift objects, such as project, route, and template controller objects, and then uses the API to enforce the specified state. - -The OpenShift controller manager is managed by the OpenShift Controller Manager Operator. -|OpenShift OAuth API server -|The OpenShift OAuth API server validates and configures the data to authenticate to {product-title}, such as users, groups, and OAuth tokens. - -The OpenShift OAuth API server is managed by the Cluster Authentication Operator. -|OpenShift OAuth server -|Users request tokens from the OpenShift OAuth server to authenticate themselves to the API. - -The OpenShift OAuth server is managed by the Cluster Authentication Operator. -|=== - -Some of these services on the control plane machines run as systemd services, while others run as static pods. - -Systemd services are appropriate for services that you need to always come up on that particular system shortly after it starts. For control plane machines, those include sshd, which allows remote login. It also includes services such as: - -* The CRI-O container engine (crio), which runs and manages the containers. {product-title} {product-version} uses CRI-O instead of the Docker Container Engine. -* Kubelet (kubelet), which accepts requests for managing containers on the machine from control plane services. - -CRI-O and Kubelet must run directly on the host as systemd services because they need to be running before you can run other containers. - -The [x-]`installer-*` and [x-]`revision-pruner-*` control plane pods must run with root permissions because they write to the `/etc/kubernetes` directory, which is owned by the root user. These pods are in the following namespaces: - -* `openshift-etcd` -* `openshift-kube-apiserver` -* `openshift-kube-controller-manager` -* `openshift-kube-scheduler` diff --git a/modules/architecture-platform-benefits.adoc b/modules/architecture-platform-benefits.adoc deleted file mode 100644 index a8de25b3f252..000000000000 --- a/modules/architecture-platform-benefits.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -:_content-type: CONCEPT -[id="architecture-platform-benefits_{context}"] -= {product-title} overview - -//// -Red Hat was one of the early contributors of Kubernetes and quickly integrated -it as the centerpiece of its {product-title} product line. Today, Red Hat -continues as one of the largest contributors to Kubernetes across a wide range -of technology areas. -//// - -{product-title} provides enterprise-ready enhancements to Kubernetes, including the following enhancements: - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -* Hybrid cloud deployments. You can deploy {product-title} clusters to a variety of public cloud platforms or in your data center. -endif::[] -* Integrated Red Hat technology. Major components in {product-title} come from {op-system-base-full} and related Red Hat technologies. {product-title} benefits from the intense testing and certification initiatives for Red Hat's enterprise quality software. -* Open source development model. Development is completed in the open, and the source code is available from public software repositories. This open collaboration fosters rapid innovation and development. - -Although Kubernetes excels at managing your applications, it does not specify -or manage platform-level requirements or deployment processes. Powerful and -flexible platform management tools and processes are important benefits that -{product-title} {product-version} offers. The following sections describe some -unique features and benefits of {product-title}. - -[id="architecture-custom-os_{context}"] -== Custom operating system - -{product-title} uses {op-system-first}, a container-oriented operating system that is specifically designed for running containerized applications from {product-title} and works with new tools to provide fast installation, Operator-based management, and simplified upgrades. - -{op-system} includes: - -* Ignition, which {product-title} uses as a firstboot system configuration for initially bringing up and configuring machines. -* CRI-O, a Kubernetes native container runtime implementation that integrates closely with the operating system to deliver an efficient and optimized Kubernetes experience. CRI-O provides facilities for running, stopping, and restarting containers. It fully replaces the Docker Container Engine, which was used in {product-title} 3. -* Kubelet, the primary node agent for Kubernetes that is responsible for -launching and monitoring containers. - -In {product-title} {product-version}, you must use {op-system} for all control -plane machines, but you can use Red Hat Enterprise Linux (RHEL) as the operating -system for compute machines, which are also known as worker machines. If you choose to use RHEL workers, you -must perform more system maintenance than if you use {op-system} for all of the -cluster machines. - -[id="architecture-platform-management_{context}"] -== Simplified installation and update process - -With {product-title} {product-version}, if you have an account with the right -permissions, you can deploy a production cluster in supported clouds by running -a single command and providing a few values. You can also customize your cloud -installation or install your cluster in your data center if you use a supported -platform. - -For clusters that use {op-system} for all machines, updating, or -upgrading, {product-title} is a simple, highly-automated process. Because -{product-title} completely controls the systems and services that run on each -machine, including the operating system itself, from a central control plane, -upgrades are designed to become automatic events. If your cluster contains -RHEL worker machines, the control plane benefits from the streamlined update -process, but you must perform more tasks to upgrade the RHEL machines. - -[id="architecture-key-features_{context}"] -== Other key features - -Operators are both the fundamental unit of the {product-title} {product-version} -code base and a convenient way to deploy applications and software components -for your applications to use. In {product-title}, Operators serve as the platform foundation and remove the need for manual upgrades of operating systems and control plane applications. {product-title} Operators such as the -Cluster Version Operator and Machine Config Operator allow simplified, -cluster-wide management of those critical components. - -Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for -storing and distributing Operators to people developing and deploying applications. - -The Red Hat Quay Container Registry is a Quay.io container registry that serves -most of the container images and Operators to {product-title} clusters. -Quay.io is a public registry version of Red Hat Quay that stores millions of images -and tags. - -Other enhancements to Kubernetes in {product-title} include improvements in -software defined networking (SDN), authentication, log aggregation, monitoring, -and routing. {product-title} also offers a comprehensive web console and the -custom OpenShift CLI (`oc`) interface. - - -//// -{product-title} includes the following infrastructure components: - -* OpenShift API server -* Kubernetes API server -* Kubernetes controller manager -* Kubernetes nodes/kubelet -* CRI-O -* {op-system} -* Infrastructure Operators -* Networking (SDN/Router/DNS) -* Storage -* Monitoring -* Telemetry -* Security -* Authorization/Authentication/Oauth -* Logging - -It also offers the following user interfaces: -* Web Console -* OpenShift CLI (`oc`) -* Rest API -//// - - -[id="architecture-overview-image_{context}"] -== {product-title} lifecycle - -The following figure illustrates the basic {product-title} lifecycle: - -* Creating an {product-title} cluster -* Managing the cluster -* Developing and deploying applications -* Scaling up applications - -.High level {product-title} overview -image::product-workflow-overview.png[High-level {product-title} flow] diff --git a/modules/architecture-platform-introduction.adoc b/modules/architecture-platform-introduction.adoc deleted file mode 100644 index a31f748429c7..000000000000 --- a/modules/architecture-platform-introduction.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// * architecture/architecture.adoc - -[id="architecture-platform-introduction_{context}"] -= Introduction to {product-title} - -{product-title} is a platform for developing and running containerized -applications. It is designed to allow applications and the data centers -that support them to expand from just a few machines and applications to -thousands of machines that serve millions of clients. - -With its foundation in Kubernetes, {product-title} incorporates the same -technology that serves as the engine for massive telecommunications, streaming -video, gaming, banking, and other applications. Its implementation in open -Red Hat technologies lets you extend your containerized applications beyond a -single cloud to on-premise and multi-cloud environments. - -image::oke-arch-ocp-stack.png[Red Hat {oke}] - -// The architecture presented here is meant to give you insights into how {product-title} works. It does this by stepping you through the process of installing an {product-title} cluster, managing the cluster, and developing and deploying applications on it. Along the way, this architecture describes: - -// * Major components of {product-title} -// * Ways of exploring different aspects of {product-title} yourself -// * Available frontdoors (and backdoors) to modify the installation and management of your {product-title} cluster -// * Different types of container application types diff --git a/modules/architecture-rhcos-updating-bootloader.adoc b/modules/architecture-rhcos-updating-bootloader.adoc deleted file mode 100644 index 694af67ecf58..000000000000 --- a/modules/architecture-rhcos-updating-bootloader.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc - -[id="architecture-rhcos-updating-bootloader.adoc_{context}"] -= Updating the bootloader using bootupd - -To update the bootloader by using `bootupd`, you must either install `bootupd` on {op-system} machines manually or provide a machine config with the enabled `systemd` unit. Unlike `grubby` or other bootloader tools, `bootupd` does not manage kernel space configuration such as passing kernel arguments. - -After you have installed `bootupd`, you can manage it remotely from the {product-title} cluster. - -[NOTE] -==== -It is recommended that you use `bootupd` only on bare metal or virtualized hypervisor installations, such as for protection against the BootHole vulnerability. -==== - -.Manual install method -You can manually install `bootupd` by using the `bootctl` command-line tool. - -. Inspect the system status: -+ -[source,terminal] ----- -# bootupctl status ----- -+ -.Example output for `x86_64` -[source,terminal] ----- -Component EFI - Installed: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 - Update: At latest version ----- -ifndef::openshift-origin[] -+ -.Example output for `aarch64` -[source, terminal] ----- -Component EFI - Installed: grub2-efi-aa64-1:2.02-99.el8_4.1.aarch64,shim-aa64-15.4-2.el8_1.aarch64 - Update: At latest version ----- -endif::openshift-origin[] - -[start=2] -. {op-system} images created without `bootupd` installed on them require an explicit adoption phase. -+ -If the system status is `Adoptable`, perform the adoption: -+ -[source,terminal] ----- -# bootupctl adopt-and-update ----- -+ -.Example output -[source,terminal] ----- -Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 ----- - -. If an update is available, apply the update so that the changes take effect on the next reboot: -+ -[source,terminal] ----- -# bootupctl update ----- -+ -.Example output -[source,terminal] ----- -Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 ----- - -.Machine config method -Another way to enable `bootupd` is by providing a machine config. - -* Provide a machine config file with the enabled `systemd` unit, as shown in the following example: -+ -.Example output -[source,yaml] ----- - variant: rhcos - version: 1.1.0 - systemd: - units: - - name: custom-bootupd-auto.service - enabled: true - contents: | - [Unit] - Description=Bootupd automatic update - - [Service] - ExecStart=/usr/bin/bootupctl update - RemainAfterExit=yes - - [Install] - WantedBy=multi-user.target ----- diff --git a/modules/argo-cd-command-line.adoc b/modules/argo-cd-command-line.adoc deleted file mode 100644 index f240c4a954df..000000000000 --- a/modules/argo-cd-command-line.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * argo-cd-custom-resource-properties.adoc - -:_content-type: PROCEDURE -[id="command-line-tool_{context}"] -= Argo CD CLI tool - -[role="_abstract"] -The Argo CD CLI tool is a tool used to configure Argo CD through the command line. {gitops-title} does not support this binary. Use the OpenShift Console to configure the Argo CD. - diff --git a/modules/assisted-installer-adding-hosts-to-the-cluster.adoc b/modules/assisted-installer-adding-hosts-to-the-cluster.adoc deleted file mode 100644 index 1942549311d3..000000000000 --- a/modules/assisted-installer-adding-hosts-to-the-cluster.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="adding-hosts-to-the-cluster_{context}"] -= Adding hosts to the cluster - -You must add one or more hosts to the cluster. Adding a host to the cluster involves generating a discovery ISO. The discovery ISO runs {op-system-first} in-memory with an agent. Perform the following procedure for each host on the cluster. - -.Procedure - -. Click the *Add hosts* button and select the installation media. - -.. Select *Minimal image file: Provision with virtual media* to download a smaller image that will fetch the data needed to boot. The nodes must have virtual media capability. This is the recommended method. - -.. Select *Full image file: Provision with physical media* to download the larger full image. - -. Add an SSH public key so that you can connect to the cluster nodes as the `core` user. Having a login to the cluster nodes can provide you with debugging information during the installation. - -. Optional: If the cluster hosts are behind a firewall that requires the use of a proxy, select *Configure cluster-wide proxy settings*. Enter the username, password, IP address and port for the HTTP and HTTPS URLs of the proxy server. - -. Click *Generate Discovery ISO*. - -. Download the discovery ISO. diff --git a/modules/assisted-installer-assisted-installer-prerequisites.adoc b/modules/assisted-installer-assisted-installer-prerequisites.adoc deleted file mode 100644 index 564af9a31f0c..000000000000 --- a/modules/assisted-installer-assisted-installer-prerequisites.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc -:_content-type: CONCEPT - -[id='assisted-installer-prerequisites_{context}'] -= Assisted Installer prerequisites - -The {ai-full} validates the following prerequisites to ensure successful installation. - -== Hardware - -For control plane nodes or the {sno} node, nodes must have at least the following resources: - -* 8 CPU cores -* 16.00 GiB RAM -* 100 GB storage -* 10ms write speed or less for etcd `wal_fsync_duration_seconds` - -For worker nodes, each node must have at least the following resources: - -* 4 CPU cores -* 16.00 GiB RAM -* 100 GB storage - -== Networking - -The network must meet the following requirements: - -* A DHCP server unless using static IP addressing. -* A base domain name. You must ensure that the following requirements are met: - - There is no wildcard, such as `*..`, or the installation will not proceed. - - A DNS A/AAAA record for `api..`. - - A DNS A/AAAA record with a wildcard for `*.apps..`. -* Port `6443` is open for the API URL if you intend to allow users outside the firewall to access the cluster via the `oc` CLI tool. -* Port `443` is open for the console if you intend to allow users outside the firewall to access the console. - -[IMPORTANT] -==== -DNS A/AAAA record settings at top-level domain registrars can take significant time to update. Ensure the A/AAAA record DNS settings are working before installation to prevent installation delays. -==== - -The {product-title} cluster's network must also meet the following requirements: - -* Connectivity between all cluster nodes -* Connectivity for each node to the internet -* Access to an NTP server for time synchronization between the cluster nodes - -== Preflight validations - -The {ai-full} ensures the cluster meets the prerequisites before installation, because it eliminates complex post-installation troubleshooting, thereby saving significant amounts of time and effort. Before installing software on the nodes, the {ai-full} conducts the following validations: - -* Ensures network connectivity -* Ensures sufficient network bandwidth -* Ensures connectivity to the registry -* Ensures time synchronization between cluster nodes -* Verifies that the cluster nodes meet the minimum hardware requirements -* Validates the installation configuration parameters - -If the {ai-full} does not successfully validate the foregoing requirements, installation will not proceed. diff --git a/modules/assisted-installer-booting-with-a-usb-drive.adoc b/modules/assisted-installer-booting-with-a-usb-drive.adoc deleted file mode 100644 index d0b0dfb8a6ab..000000000000 --- a/modules/assisted-installer-booting-with-a-usb-drive.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="booting-with-a-usb-drive_{context}"] -= Booting with a USB drive - -To register nodes with the {ai-full} using a bootable USB drive, use the following procedure. - -.Procedure - -. Attach the {op-system} discovery ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO, and then reboot the server. - -. On the administration host, return to the browser. Wait for the host to appear in the list of discovered hosts. diff --git a/modules/assisted-installer-completing-the-installation.adoc b/modules/assisted-installer-completing-the-installation.adoc deleted file mode 100644 index ef22614e4013..000000000000 --- a/modules/assisted-installer-completing-the-installation.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="completing-the-installation_{context}"] -= Completing the installation - -After the cluster is installed and initialized, the {ai-full} indicates that the installation is finished. The {ai-full} provides the console URL, the `kubeadmin` username and password, and the `kubeconfig` file. Additionally, the {ai-full} provides cluster details including the {product-title} version, base domain, CPU architecture, API and Ingress IP addresses, and the cluster and service network IP addresses. - -.Prerequisites - -* You have installed the `oc` CLI tool. - - -.Procedure - -. Make a copy of the `kubeadmin` username and password. - -. Download the `kubeconfig` file and copy it to the `auth` directory under your working directory: -+ -[source,terminal] ----- -$ mkdir -p /auth ----- -+ -[source,terminal] ----- -$ cp kubeadmin /auth ----- -+ -[NOTE] -==== -The `kubeconfig` file is available for download for 24 hours after completing the installation. -==== - -. Add the `kubeconfig` file to your environment: -+ -[source,terminal] ----- -$ export KUBECONFIG=/auth/kubeconfig ----- - -. Login with the `oc` CLI tool: -+ -[source,terminal] ----- -$ oc login -u kubeadmin -p ----- -+ -Replace `` with the password of the `kubeadmin` user. - -. Click on the web console URL or click *Launch OpenShift Console* to open the console. - -. Enter the `kubeadmin` username and password. Follow the instructions in the {product-title} console to configure an identity provider and configure alert receivers. - -. Add a bookmark of the {product-title} console. diff --git a/modules/assisted-installer-configuring-host-network-interfaces.adoc b/modules/assisted-installer-configuring-host-network-interfaces.adoc deleted file mode 100644 index daf69425239a..000000000000 --- a/modules/assisted-installer-configuring-host-network-interfaces.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-host-network-interfaces_{context}"] -= Optional: Configuring host network interfaces - -The {ai-full} supports IPv4 networking and dual stack networking. The {ai-full} also supports configuring host network interfaces with the NMState library, a declarative network manager API for hosts. You can use NMState to deploy hosts with static IP addressing, bonds, VLANs and other advanced networking features. If you chose to configure host network interfaces, you must set network-wide configurations. Then, you must create a host-specific configuration for each host and generate the discovery ISO with the host-specific settings. - -.Procedure - -. Select the internet protocol version. Valid options are *IPv4* and *Dual stack*. - -. If the cluster hosts are on a shared VLAN, enter the VLAN ID. - -. Enter the network-wide IP addresses. If you selected *Dual stack* networking, you must enter both IPv4 and IPv6 addresses. - -.. Enter the cluster network's IP address range in CIDR notation. - -.. Enter the default gateway IP address. - -.. Enter the DNS server IP addresss. - -. Enter the host-specific configuration. - -.. If you are only setting a static IP address that uses a single network interface, use the form view to enter the IP address and the MAC address for the host. - -.. If you are using multiple interfaces, bonding, or other advanced networking features, use the YAML view and enter the desired network state for the host using NMState syntax. - -.. Add the MAC address and interface name for each interface used in your network configuration. diff --git a/modules/assisted-installer-configuring-hosts.adoc b/modules/assisted-installer-configuring-hosts.adoc deleted file mode 100644 index 730906f8bfcd..000000000000 --- a/modules/assisted-installer-configuring-hosts.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-hosts_{context}"] -= Configuring hosts - -After booting the hosts with the discovery ISO, the hosts will appear in the table at the bottom of the page. You can configure the hostname, role, and installation disk for each host. - -.Procedure - -. Select a host. - -. From the *Actions* list, select *Change hostname*. You must ensure each host has a valid and unique hostname. If necessary, enter a new name for the host and click *Change*. - -. For multi-host clusters, in the *Role* column next to the host name, you can click on the menu to change the role of the host. -+ -If you do not select a role, the {ai-full} will assign the role automatically. The minimum hardware requirements for control plane nodes exceed that of worker nodes. If you assign a role to a host, ensure that you assign the control plane role to hosts that meet the minimum hardware requirements. - -. To the left of the checkbox next to a host name, click to expand the host details. If you have multiple disk drives, you can select a different disk drive to act as the installation disk. - -. Repeat this procedure for each host. - -Once all cluster hosts appear with a status of *Ready*, proceed to the next step. diff --git a/modules/assisted-installer-configuring-networking.adoc b/modules/assisted-installer-configuring-networking.adoc deleted file mode 100644 index 02abe638d69d..000000000000 --- a/modules/assisted-installer-configuring-networking.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-networking_{context}"] -= Configuring networking - -Before installing {product-title}, you must configure the cluster network. - -.Procedure - -. In the *Networking* page, select one of the following if it is not already selected for you: -+ -** *Cluster-Managed Networking:* Selecting cluster-managed networking means that the {ai-full} will configure a standard network topology, including `keepalived` and Virtual Router Redundancy Protocol (VRRP) for managing the API and Ingress VIP addresses. -+ -** *User-Managed Networking*: Selecting user-managed networking allows you to deploy {product-title} with a non-standard network topology. For example, if you want to deploy with an external load balancer instead of `keepalived` and VRRP, or if you intend to deploy the cluster nodes across many distinct L2 network segments. - -. For cluster-managed networking, configure the following settings: - -.. Define the *Machine network*. You can use the default network or select a subnet. - -.. Define an *API virtual IP*. An API virtual IP provides an endpoint for all users to interact with, and configure the platform. - -.. Define an *Ingress virtual IP*. An Ingress virtual IP provides an endpoint for application traffic flowing from outside the cluster. - -. For user-managed networking, configure the following settings: - -.. Select your *Networking stack type*: -+ -** *IPv4*: Select this type when your hosts are only using IPv4. -+ -** *Dual-stack*: You can select dual-stack when your hosts are using IPv4 together with IPv6. - -.. Define the *Machine network*. You can use the default network or select a subnet. - -.. Define an *API virtual IP*. An API virtual IP provides an endpoint for all users to interact with, and configure the platform. - -.. Define an *Ingress virtual IP*. An Ingress virtual IP provides an endpoint for application traffic flowing from outside the cluster. - -.. Optional: You can select *Allocate IPs via DHCP server* to automatically allocate the *API IP* and *Ingress IP* using the DHCP server. - -. Optional: Select *Use advanced networking* to configure the following advanced networking properties: - -** *Cluster network CIDR*: Define an IP address block from which Pod IP addresses are allocated. - -** *Cluster network host prefix*: Define a subnet prefix length to assign to each node. - -** *Service network CIDR*: Define an IP address to use for service IP addresses. - -** *Network type*: Select either *Software-Defined Networking (SDN)* for standard networking or *Open Virtual Networking (OVN)* for telco features. diff --git a/modules/assisted-installer-installing-the-cluster.adoc b/modules/assisted-installer-installing-the-cluster.adoc deleted file mode 100644 index 88dde8da1bbd..000000000000 --- a/modules/assisted-installer-installing-the-cluster.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="installing-the-cluster_{context}"] -= Installing the cluster - -After you have completed the configuration and all the nodes are *Ready*, you can begin installation. The installation process takes a considerable amount of time, and you can monitor the installation from the {ai-full} web console. Nodes will reboot during the installation, and they will initialize after installation. - -.Procedure - -* Press *Begin installation*. - -. Click on the link in the *Status* column of the *Host Inventory* list to see the installation status of a particular host. diff --git a/modules/assisted-installer-pre-installation-considerations.adoc b/modules/assisted-installer-pre-installation-considerations.adoc deleted file mode 100644 index ce46b6db4d63..000000000000 --- a/modules/assisted-installer-pre-installation-considerations.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc - -:_content-type: CONCEPT -[id='pre-installation-considerations_{context}'] -= Pre-installation considerations - -Before installing {product-title} with the {ai-full}, you must consider the following configuration choices: - -* Which base domain to use -* Which {product-title} product version to install -* Whether to install a full cluster or {sno} -* Whether to use a DHCP server or a static network configuration -* Whether to use IPv4 or dual-stack networking -* Whether to install {VirtProductName} -* Whether to install {rh-storage-first} -* Whether to integrate with vSphere when installing on vSphere diff --git a/modules/assisted-installer-release-notes.adoc b/modules/assisted-installer-release-notes.adoc deleted file mode 100644 index dc5bc02db2ac..000000000000 --- a/modules/assisted-installer-release-notes.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// This is included in the following assemblies: -// -//installing_bare_metal_assisted/installing-bare-metal-assisted.adoc - -:_content-type: REFERENCE -[id="assisted-installer-release-notes_{context}"] -= {ai-full} {ai-version} release notes - -[id="ai-release-notes-about-this-release_{context}"] -== About this release - -These release notes track the development of {ai-full} {ai-version}. - -This product was previously released as a Technology Preview product and is now generally available and enabled by default in the {cluster-manager-first}. - -[id="ai-release-notes-bug-fixes_{context}"] -== Bug fixes - -* Previously, users could define `localhost` as a valid host name for all of their hosts. As a result, host names were not unique, and {ai-full} could not install the cluster. With this release, users cannot complete the cluster installation if any of the hosts are named `localhost`. An error appears and users must rename the hosts. -//(link:https://issues.redhat.com/browse/MGMT-8088[MGMT-8088]) - -* Previously, in the *OpenShift Web Console troubleshooting* window, the *Optional* field remained blank when undefined instead of displaying an IP address. With this release, the *Optional* field was removed. -//(link:https://issues.redhat.com/browse/MGMT-9283[MGMT-9283]) - -* Previously, when installing a cluster on vSphere, the {ai-full} created machines and `MachineSet` objects for every virtual machine. With this release, {ai-full} no longer creates machines or `MachineSet` objects for user-provisioned VMs. -//(link:https://issues.redhat.com/browse/MGMT-9559[MGMT-9559]) - -* Previously, if Operators failed to install during an installation with {ai-full}, users received an error message and were directed to reset the cluster installation. With this release, if Operators fail to install, the cluster is automatically degraded. - -* Previously, after installing an Operator using {ai-full}, the Operator appeared as *available* in the cluster *Status* area in the *Installation progress* page. However, users had to check the Operator avilability in the {product-title} web console. With this release, the Operator appears as *installed* in the *Status* area. - -[id="ai-release-notes-known-issues_{context}"] -== Known issues - -* The minimum disk size required for installing on bare metal using {ai-full} is specified as 120GB. The actual required minimum disk size is 100GB. -//(link:https://issues.redhat.com/browse/MGMT-9682[MGMT-9682]) diff --git a/modules/assisted-installer-setting-the-cluster-details.adoc b/modules/assisted-installer-setting-the-cluster-details.adoc deleted file mode 100644 index e1718b286592..000000000000 --- a/modules/assisted-installer-setting-the-cluster-details.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc - -:_content-type: PROCEDURE -[id='setting-the-cluster-details_{context}'] -= Setting the cluster details - -To create a cluster with the {ai-full} web user interface, use the following procedure. - -.Procedure - -. Log in to the link:https://console.redhat.com[RedHat Hybrid Cloud Console]. - -. In the menu, click *OpenShift*. - -. Click *Create cluster*. - -. Click the *Datacenter* tab. - -. Under the *{ai-full}* section, select *Create cluster*. - -. Enter a name for the cluster in the *Cluster name* field. - -. Enter a base domain for the cluster in the *Base domain* field. All subdomains for the cluster will use this base domain. -+ -[NOTE] -==== -The base domain must be a valid DNS name. You must not have a wild card domain set up for the base domain. -==== - -. Select the version of {product-title} to install. - -. Optional: Select *Install single node Openshift (SNO)* if you want to install {product-title} on a single node. - -. Optional: The {ai-full} already has the pull secret associated to your account. If you want to use a different pull secret, select *Edit pull secret*. - -. Optional: {ai-full} defaults to using x86_64 CPU architecture. If you are installing {product-title} on 64-bit ARM CPUs, select *Use arm64 CPU architecture*. Keep in mind, some features are not available with 64-bit ARM CPU architecture. - -. Optional: If you are using a static IP configuration for the cluster nodes instead of DHCP reservations, select *Static network configuration*. - -. Optional: If you want to enable encryption of the installation disks, select *Enable encryption of installation disks*. For multi-node clusters, you can choose to encrypt the control plane and worker node installation disks separately. - -[IMPORTANT] -==== -You cannot change the base domain, the SNO checkbox, the CPU architecture, the host's network configuration, or the disk-encryption after installation begins. -==== diff --git a/modules/assisted-installer-using-the-assisted-installer.adoc b/modules/assisted-installer-using-the-assisted-installer.adoc deleted file mode 100644 index e4df53f5e835..000000000000 --- a/modules/assisted-installer-using-the-assisted-installer.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc -:_content-type: CONCEPT - -[id="using-the-assisted-installer_{context}"] -= Using the Assisted Installer - -The {product-title} link:https://console.redhat.com/openshift/assisted-installer/clusters/~new[{ai-full}] is a user-friendly installation solution offered on the link:http://console.redhat.com[Red Hat Hybrid Cloud Console]. The {ai-full} supports the various deployment platforms with a focus on bare metal, Nutanix, and vSphere infrastructures. - -The {ai-full} provides installation functionality as a service. This software-as-a-service (SaaS) approach has the following advantages: - -* *Web user interface:* The web user interface performs cluster installation without the user having to create the installation configuration files manually. -* *No bootstrap node:* A bootstrap node is not required when installing with the {ai-full}. The bootstrapping process executes on a node within the cluster. -* *Hosting:* The {ai-full} hosts: - - Ignition files - - The installation configuration - - A discovery ISO - - The installer -* *Streamlined installation workflow:* Deployment does not require in-depth knowledge of {product-title}. The {ai-full} provides reasonable defaults and provides the installer as a service, which: - - Eliminates the need to install and run the {product-title} installer locally. - - Ensures the latest version of the installer up to the latest tested z-stream releases. Older versions remain available, if needed. - - Enables building automation by using the API without the need to run the {product-title} installer locally. -* *Advanced networking:* The {ai-full} supports IPv4 networking with SDN and OVN, IPv6 and dual stack networking with OVN only, NMState-based static IP addressing, and an HTTP/S proxy. OVN is the default Container Network Interface (CNI) for OpenShift Container Platform 4.12 and later releases, but you can use SDN. - -* *Pre-installation validation:* The {ai-full} validates the configuration before installation to ensure a high probability of success. Validation includes: - - Ensuring network connectivity - - Ensuring sufficient network bandwidth - - Ensuring connectivity to the registry - - Ensuring time synchronization between cluster nodes - - Verifying that the cluster nodes meet the minimum hardware requirements - - Validating the installation configuration parameters -* *REST API:* The {ai-full} has a REST API, enabling automation. - -The {ai-full} supports installing {product-title} on premises in a connected environment, including with an optional HTTP/S proxy. It can install the following: - -* Highly available {product-title} or Single Node OpenShift (SNO) -+ -[NOTE] -==== -SNO is not supported on {ibmzProductName} and {ibmpowerProductName}. -==== -+ -* {product-title} on bare metal, Nutanix, or vSphere with full platform integration, or other virtualization platforms without integration -* Optionally {VirtProductName} and {rh-storage} (formerly OpenShift Container Storage) - -The user interface provides an intuitive interactive workflow where automation does not exist or is not required. Users may also automate installations using the REST API. - -See link:https://console.redhat.com/openshift/assisted-installer/clusters/~new[Install OpenShift with the Assisted Installer] to create an {product-title} cluster with the {ai-full}. See the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[Assisted Installer for OpenShift Container Platform] documentation for details on using the {ai-full}. diff --git a/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc b/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc deleted file mode 100644 index 62e76c2bac51..000000000000 --- a/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="assuming-an-aws-iam-role-in-your-own-pods_{context}"] -= Assuming an AWS IAM role in your own pods - -Follow the procedures in this section to enable a service account to assume an AWS Identity and Access Management (IAM) role in a pod deployed in a user-defined project. - -You can create the required resources, including an AWS IAM role, a service account, a container image that includes an AWS SDK, and a pod deployed by using the image. In the example, the AWS Boto3 SDK for Python is used. You can also verify that the pod identity webhook mutates the AWS environment variables, the volume mount, and the token volume into your pod. Additionally, you can check that the service account assumes the AWS IAM role in your pod and can successfully run AWS SDK operations. diff --git a/modules/auth-allowing-javascript-access-api-server.adoc b/modules/auth-allowing-javascript-access-api-server.adoc deleted file mode 100644 index 8330c0f70297..000000000000 --- a/modules/auth-allowing-javascript-access-api-server.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * security/allowing-javascript-access-api-server.adoc - -:_content-type: PROCEDURE -[id="auth-allowing-javascript-access-api-server_{context}"] -= Allowing JavaScript-based access to the API server from additional hosts - -The default {product-title} configuration only allows the web console to send requests to the API server. - -If you need to access the API server or OAuth server from a JavaScript -application using a different hostname, you can configure additional hostnames -to allow. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `APIServer` resource: -+ -[source,terminal] ----- -$ oc edit apiserver.config.openshift.io cluster ----- -+ -. Add the `additionalCORSAllowedOrigins` field under the `spec` section and -specify one or more additional hostnames: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-07-11T17:35:37Z" - generation: 1 - name: cluster - resourceVersion: "907" - selfLink: /apis/config.openshift.io/v1/apiservers/cluster - uid: 4b45a8dd-a402-11e9-91ec-0219944e0696 -spec: - additionalCORSAllowedOrigins: - - (?i)//my\.subdomain\.domain\.com(:|\z) <1> ----- -<1> The hostname is specified as a link:https://github.com/google/re2/wiki/Syntax[Golang regular expression] that matches -against CORS headers from HTTP requests against the API server and OAuth server. -+ -[NOTE] -==== -This example uses the following syntax: - -* The `(?i)` makes it case-insensitive. -* The `//` pins to the beginning of the domain and matches the double slash -following `http:` or `https:`. -* The `\.` escapes dots in the domain name. -* The `(:|\z)` matches the end of the domain name `(\z)` or a port separator -`(:)`. -==== - -. Save the file to apply the changes. diff --git a/modules/authentication-api-impersonation.adoc b/modules/authentication-api-impersonation.adoc deleted file mode 100644 index 63e6de2d9f40..000000000000 --- a/modules/authentication-api-impersonation.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc -// * applications/projects/creating-project-other-user.adoc -// * users_and_roles/impersonating-system-admin.adoc - -[id="authentication-api-impersonation_{context}"] -= API impersonation - -You can configure a request to the {product-title} API to act as though it originated from another user. For more information, see link:https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation[User impersonation] in the Kubernetes documentation. diff --git a/modules/authentication-authorization-common-terms.adoc b/modules/authentication-authorization-common-terms.adoc deleted file mode 100644 index e0e5cb6c239a..000000000000 --- a/modules/authentication-authorization-common-terms.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/index.adoc - -:_content-type: REFERENCE -[id="openshift-auth-common-terms_{context}"] -= Glossary of common terms for {product-title} authentication and authorization - -This glossary defines common terms that are used in {product-title} authentication and authorization. - -authentication:: -An authentication determines access to an {product-title} cluster and ensures only authenticated users access the {product-title} cluster. - -authorization:: -Authorization determines whether the identified user has permissions to perform the requested action. - -bearer token:: -Bearer token is used to authenticate to API with the header `Authorization: Bearer `. - -Cloud Credential Operator:: -The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). - -config map:: -A config map provides a way to inject configuration data into the pods. You can reference the data stored in a config map in a volume of type `ConfigMap`. Applications running in a pod can use this data. - -containers:: -Lightweight and executable images that consist software and all its dependencies. Because containers virtualize the operating system, you can run containers in a data center, public or private cloud, or your local host. - -Custom Resource (CR):: -A CR is an extension of the Kubernetes API. - -group:: -A group is a set of users. A group is useful for granting permissions to multiple users one time. - -HTPasswd:: -HTPasswd updates the files that store usernames and password for authentication of HTTP users. - -Keystone:: -Keystone is an {rh-openstack-first} project that provides identity, token, catalog, and policy services. - -Lightweight directory access protocol (LDAP):: -LDAP is a protocol that queries user information. - -manual mode:: -In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). - -mint mode:: -Mint mode is the default and recommended best practice setting for the Cloud Credential Operator (CCO) to use on the platforms for which it is supported. In this mode, the CCO uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -namespace:: -A namespace isolates specific system resources that are visible to all processes. Inside a namespace, only processes that are members of that namespace can see those resources. - -node:: -A node is a worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -OAuth client:: -OAuth client is used to get a bearer token. - -OAuth server:: -The {product-title} control plane includes a built-in OAuth server that determines the user’s identity from the configured identity provider and creates an access token. - -OpenID Connect:: -The OpenID Connect is a protocol to authenticate the users to use single sign-on (SSO) to access sites that use OpenID Providers. - -passthrough mode:: -In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. - -pod:: -A pod is the smallest logical unit in Kubernetes. A pod is comprised of one or more containers to run in a worker node. - -regular users:: -Users that are created automatically in the cluster upon first login or via the API. - -request header:: -A request header is an HTTP header that is used to provide information about HTTP request context, so that the server can track the response of the request. - -role-based access control (RBAC):: -A key security control to ensure that cluster users and workloads have access to only the resources required to execute their roles. - -service accounts:: -Service accounts are used by the cluster components or applications. - -system users:: -Users that are created automatically when the cluster is installed. - -users:: -Users is an entity that can make requests to API. diff --git a/modules/authentication-kubeadmin.adoc b/modules/authentication-kubeadmin.adoc deleted file mode 100644 index 1d07236299b3..000000000000 --- a/modules/authentication-kubeadmin.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/removing-kubeadmin.adoc -// * post_installation_configuration/preparing-for-users.adoc - -[id="understanding-kubeadmin_{context}"] -= The kubeadmin user - -{product-title} creates a cluster administrator, `kubeadmin`, after the -installation process completes. - -This user has the `cluster-admin` role automatically applied and is treated -as the root user for the cluster. The password is dynamically generated -and unique to your {product-title} environment. After installation -completes the password is provided in the installation program's output. -For example: - -[source,terminal] ----- -INFO Install complete! -INFO Run 'export KUBECONFIG=/auth/kubeconfig' to manage the cluster with 'oc', the OpenShift CLI. -INFO The cluster is ready when 'oc login -u kubeadmin -p ' succeeds (wait a few minutes). -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.demo1.openshift4-beta-abcorp.com -INFO Login to the console with user: kubeadmin, password: ----- diff --git a/modules/authentication-prometheus-system-metrics.adoc b/modules/authentication-prometheus-system-metrics.adoc deleted file mode 100644 index e8a10be6a179..000000000000 --- a/modules/authentication-prometheus-system-metrics.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc - -[id="authentication-prometheus-system-metrics_{context}"] -= Authentication metrics for Prometheus - -{product-title} captures the following Prometheus system metrics during authentication attempts: - -* `openshift_auth_basic_password_count` counts the number of `oc login` user name and password attempts. -* `openshift_auth_basic_password_count_result` counts the number of `oc login` user name and password attempts by result, `success` or `error`. -* `openshift_auth_form_password_count` counts the number of web console login attempts. -* `openshift_auth_form_password_count_result` counts the number of web console login attempts by result, `success` or `error`. -* `openshift_auth_password_total` counts the total number of `oc login` and web console login attempts. diff --git a/modules/authentication-remove-kubeadmin.adoc b/modules/authentication-remove-kubeadmin.adoc deleted file mode 100644 index bbe29a4ce955..000000000000 --- a/modules/authentication-remove-kubeadmin.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc -// * authentication/understanding-identity-provider.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="removing-kubeadmin_{context}"] -= Removing the kubeadmin user - -After you define an identity provider and create a new `cluster-admin` -user, you can remove the `kubeadmin` to improve cluster security. - -[WARNING] -==== -If you follow this procedure before another user is a `cluster-admin`, -then {product-title} must be reinstalled. It is not possible to undo -this command. -==== - -.Prerequisites - -* You must have configured at least one identity provider. -* You must have added the `cluster-admin` role to a user. -* You must be logged in as an administrator. - -.Procedure - -* Remove the `kubeadmin` secrets: -+ -[source,terminal] ----- -$ oc delete secrets kubeadmin -n kube-system ----- diff --git a/modules/automatic-network-verification-bypassing.adoc b/modules/automatic-network-verification-bypassing.adoc deleted file mode 100644 index 1902455a418a..000000000000 --- a/modules/automatic-network-verification-bypassing.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network-verification.adoc - -:_content-type: CONCEPT -[id="automatic-network-verification-bypassing_{context}"] -= Automatic network verification bypassing - -You can bypass the automatic network verification if you want to deploy -ifdef::openshift-dedicated[] -an {product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -a {product-title} (ROSA) -endif::openshift-rosa[] -cluster with known network configuration issues into an existing Virtual Private Cloud (VPC). - -If you bypass the network verification when you create a cluster, the cluster has a limited support status. After installation, you can resolve the issues and then manually run the network verification. The limited support status is removed after the verification succeeds. - -ifdef::openshift-rosa[] -.Bypassing automatic network verification by using {cluster-manager} - -endif::openshift-rosa[] -When you install a cluster into an existing VPC by using {cluster-manager-first}, you can bypass the automatic verification by selecting *Bypass network verification* on the *Virtual Private Cloud (VPC) subnet settings* page. - -ifdef::openshift-rosa[] -.Bypassing automatic network verification by using the ROSA CLI (`rosa`) - -When you install a cluster into an existing VPC by using the `rosa create cluster` command, you can bypass the automatic verification by including the `--bypass-network-verify --force` arguments. The following example bypasses the network verification before creating a cluster: - -[source,terminal] ----- -$ rosa create cluster --cluster-name mycluster \ - --subnet-ids subnet-03146b9b52b6024cb,subnet-03146b9b52b2034cc \ - --bypass-network-verify --force ----- - -[NOTE] -==== -Alternatively, you can specify the `--interactive` argument and select the option in the interactive prompts to bypass the network verification checks. -==== -endif::openshift-rosa[] diff --git a/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc b/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc deleted file mode 100644 index fa805eadf566..000000000000 --- a/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="automatically-scaling-machines-to-available-bare-metal-hosts_{context}"] -= Automatically scaling machines to the number of available bare metal hosts - -To automatically create the number of `Machine` objects that matches the number of available `BareMetalHost` objects, add a `metal3.io/autoscale-to-hosts` annotation to the `MachineSet` object. - -.Prerequisites - -* Install {op-system} bare metal compute machines for use in the cluster, and create corresponding `BareMetalHost` objects. -* Install the {product-title} CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Annotate the compute machine set that you want to configure for automatic scaling by adding the `metal3.io/autoscale-to-hosts` annotation. Replace `` with the name of the compute machine set. -+ -[source,terminal] ----- -$ oc annotate machineset -n openshift-machine-api 'metal3.io/autoscale-to-hosts=' ----- -+ -Wait for the new scaled machines to start. - -[NOTE] -==== -When you use a `BareMetalHost` object to create a machine in the cluster and labels or selectors are subsequently changed on the `BareMetalHost`, the `BareMetalHost` object continues be counted against the `MachineSet` that the `Machine` object was created from. -==== diff --git a/modules/available-persistent-storage-options.adoc b/modules/available-persistent-storage-options.adoc deleted file mode 100644 index 718d2d073858..000000000000 --- a/modules/available-persistent-storage-options.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/optimizing-storage.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="available-persistent-storage-options_{context}"] -= Available persistent storage options - -Understand your persistent storage options so that you can optimize your -{product-title} environment. - -.Available storage options -[cols="1,4,3",options="header"] -|=== -| Storage type | Description | Examples - -|Block -a|* Presented to the operating system (OS) as a block device -* Suitable for applications that need full control of storage and operate at a low level on files -bypassing the file system -* Also referred to as a Storage Area Network (SAN) -* Non-shareable, which means that only one client at a time can mount an endpoint of this type -| AWS EBS and VMware vSphere support dynamic persistent volume (PV) provisioning natively in {product-title}. -// Ceph RBD, OpenStack Cinder, Azure Disk, GCE persistent disk - -|File -a| * Presented to the OS as a file system export to be mounted -* Also referred to as Network Attached Storage (NAS) -* Concurrency, latency, file locking mechanisms, and other capabilities vary widely between protocols, implementations, vendors, and scales. -|RHEL NFS, NetApp NFS ^[1]^, and Vendor NFS -// Azure File, AWS EFS - -| Object -a| * Accessible through a REST API endpoint -* Configurable for use in the {product-registry} -* Applications must build their drivers into the application and/or container. -| AWS S3 -// Aliyun OSS, Ceph Object Storage (RADOS Gateway) -// Google Cloud Storage, Azure Blob Storage, OpenStack Swift -|=== -[.small] --- -1. NetApp NFS supports dynamic PV provisioning when using the Trident plugin. --- - -[IMPORTANT] -==== -Currently, CNS is not supported in {product-title} {product-version}. -==== diff --git a/modules/aws-cloudwatch.adoc b/modules/aws-cloudwatch.adoc deleted file mode 100644 index e675ca103e80..000000000000 --- a/modules/aws-cloudwatch.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * adding_service_cluster/rosa-available-services.adoc -:_content-type: CONCEPT -[id="aws-cloudwatch_{context}"] - -= Amazon CloudWatch - -Amazon CloudWatch forwards logs from {product-title} (ROSA) to the AWS console for viewing. You must first install the ROSA `cluster-logging-operator` using the ROSA CLI (`rosa`) before installing the Amazon CloudWatch service through {cluster-manager-first} console. diff --git a/modules/aws-console-changing-aws-instance-type.adoc b/modules/aws-console-changing-aws-instance-type.adoc deleted file mode 100644 index 74608149b371..000000000000 --- a/modules/aws-console-changing-aws-instance-type.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc - -:_content-type: PROCEDURE -[id="aws-console-changing-aws-instance-type_{context}"] -= Changing the Amazon Web Services instance type by using the AWS console - -You can change the Amazon Web Services (AWS) instance type that your control plane machines use by updating the instance type in the AWS console. - -.Prerequisites - -* You have access to the AWS console with the permissions required to modify the EC2 Instance for your cluster. -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Open the AWS console and fetch the instances for the control plane machines. - -. Choose one control plane machine instance. - -.. For the selected control plane machine, back up the etcd data by creating an etcd snapshot. For more information, see "Backing up etcd". - -.. In the AWS console, stop the control plane machine instance. - -.. Select the stopped instance, and click *Actions* -> *Instance Settings* -> *Change instance type*. - -.. Change the instance to a larger type, ensuring that the type is the same base as the previous selection, and apply changes. For example, you can change `m6i.xlarge` to `m6i.2xlarge` or `m6i.4xlarge`. - -.. Start the instance. - -.. If your {product-title} cluster has a corresponding `Machine` object for the instance, update the instance type of the object to match the instance type set in the AWS console. - -. Repeat this process for each control plane machine. diff --git a/modules/aws-direct-connect.adoc b/modules/aws-direct-connect.adoc deleted file mode 100644 index 45b8635ba9b7..000000000000 --- a/modules/aws-direct-connect.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-direct-connect_{context}"] -= Configuring AWS Direct Connect - - -{AWS} Direct Connect requires a hosted Virtual Interface (VIF) connected to a Direct Connect Gateway (DXGateway), which is in turn associated to a Virtual Gateway (VGW) or a Transit Gateway in order to access a remote Virtual Private Cloud (VPC) in the same or another account. - -If you do not have an existing DXGateway, the typical process involves creating the hosted VIF, with the DXGateway and VGW being created in your AWS account. - -If you have an existing DXGateway connected to one or more existing VGWs, the process involves your AWS account sending an Association Proposal to the DXGateway owner. The DXGateway owner must ensure that the proposed CIDR will not conflict with any other VGWs they have associated. - -.Prerequisites - -* Confirm the CIDR range of the {product-title} VPC will not conflict with any other VGWs you have associated. -* Gather the following information: -** The Direct Connect Gateway ID. -** The AWS Account ID associated with the virtual interface. -** The BGP ASN assigned for the DXGateway. Optional: the Amazon default ASN may also be used. - -.Procedure - -. link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/create-vif.html[Create a VIF] or link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/viewvifdetails.html[view your existing VIFs] to determine the type of direct connection you need to create. - -. Create your gateway. -.. If the Direct Connect VIF type is *Private*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/virtualgateways.html#create-virtual-private-gateway[create a virtual private gateway]. -.. If the Direct Connect VIF is *Public*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways-intro.html#create-direct-connect-gateway[create a Direct Connect gateway]. - -. If you have an existing gateway you want to use, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html[create an association proposal] and send the proposal to the DXGateway owner for approval. -+ -[WARNING] -==== -When connecting to an existing DXGateway, you are responsible for the link:https://aws.amazon.com/directconnect/pricing/[costs]. -==== - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html[AWS Direct Connect] guide. diff --git a/modules/aws-limits.adoc b/modules/aws-limits.adoc deleted file mode 100644 index 69291e7b8e55..000000000000 --- a/modules/aws-limits.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="aws-limits_{context}"] -= AWS account limits - - -The {product-title} cluster uses a number of Amazon Web Services (AWS) components, and the default link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[service limits] affect your ability to install {product-title} clusters. If you use certain cluster configurations, deploy your cluster in certain AWS regions, or run multiple clusters from your account, you might need to request additional resources for your AWS account. - -The following table summarizes the AWS components whose limits can impact your ability to install and run {product-title} clusters. - - -[cols="3a,3a,3a,8a",options="header"] -|=== -|Component |Number of clusters available by default| Default AWS limit |Description - -|Instance Limits -|Varies -|Varies -|At a minimum, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane nodes -* Two infrastructure nodes for a single availability zone; three infrascture nodes for multi-availability zones -* Two worker nodes for a single availability zone; three worker nodes for multi-availability zones - -These instance type counts are within a new account's default limit. To deploy more worker nodes, deploy large workloads, or use a different instance type, review your account limits to ensure that your cluster can deploy the machines that you need. - -In most regions, the bootstrap and worker machines uses an `m4.large` machines and the control plane machines use `m4.xlarge` instances. In some regions, including all regions that do not support these instance types, `m5.large` and `m5.xlarge` instances are used instead. - -|Elastic IPs (EIPs) -|0 to 1 -|5 EIPs per account -|To provision the cluster in a highly available configuration, the installation program creates a public and private subnet for each link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zone within a region]. Each private subnet requires a link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT Gateway], and each NAT gateway requires a separate -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ipaddresses-eip.html[elastic IP]. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. To take advantage of the default high availability, install the cluster in a region with at least three availability zones. To install a cluster in a region with more than five availability zones, you must increase the EIP limit. - -// TODO: The above elastic IP link is redirected. Find new link. Is it https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html ? - -[IMPORTANT] -==== -To use the `us-east-1` region, you must increase the EIP limit for your account. -==== - -|Virtual Private Clouds (VPCs) -|5 -|5 VPCs per region -|Each cluster creates its own VPC. - -|Elastic Load Balancing (ELB) -|3 -|20 per region -|By default, each cluster creates internal and external Network Load Balancers for the primary API server and a single Classic Load Balancer for the router. Deploying more Kubernetes LoadBalancer Service objects will create additional link:https://aws.amazon.com/elasticloadbalancing/[load balancers]. - - -|NAT Gateways -|5 -|5 per availability zone -|The cluster deploys one NAT gateway in each availability zone. - -|Elastic Network Interfaces (ENIs) -|At least 12 -|350 per region -|The default installation creates 21 ENIs and an ENI for each availability zone in your region. For example, the `us-east-1` region contains six availability zones, so a cluster that is deployed in that zone uses 27 ENIs. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. - -Additional ENIs are created for additional machines and load balancers that are created by cluster usage and deployed workloads. - -|VPC Gateway -|20 -|20 per account -|Each cluster creates a single VPC Gateway for S3 access. - - -|S3 buckets -|99 -|100 buckets per account -|Because the installation process creates a temporary bucket and the registry component in each cluster creates a bucket, you can create only 99 {product-title} clusters per AWS account. - -|Security Groups -|250 -|2,500 per account -|Each cluster creates 10 distinct security groups. - | Fail, optionally surfacing response body to the user -|=== - -// TODO: what is this random text/cell on line 82^? diff --git a/modules/aws-vpc.adoc b/modules/aws-vpc.adoc deleted file mode 100644 index 4572be7b66f4..000000000000 --- a/modules/aws-vpc.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-vpc_{context}"] -= Configuring AWS VPC peering - - -A Virtual Private Cloud (VPC) peering connection is a networking connection between two VPCs that enables you to route traffic between them using private IPv4 addresses or IPv6 addresses. You can configure an {AWS} VPC containing an {product-title} cluster to peer with another AWS VPC network. - -[WARNING] -==== -Private clusters cannot be fully deleted by {cluster-manager-first} if the VPC the cluster is installed in is peered. - -AWS supports inter-region VPC peering between all commercial regions link:https://aws.amazon.com/vpc/faqs/#Peering_Connections[excluding China]. -==== - -.Prerequisites - -* Gather the following information about the Customer VPC that is required to initiate the peering request: -** Customer AWS account number -** Customer VPC ID -** Customer VPC Region -** Customer VPC CIDR -* Check the CIDR block used by the {product-title} Cluster VPC. If it overlaps or matches the CIDR block for the Customer VPC, then peering between these two VPCs is not possible; see the Amazon VPC link:https://docs.aws.amazon.com/vpc/latest/peering/invalid-peering-configurations.html[Unsupported VPC peering configurations] documentation for details. If the CIDR blocks do not overlap, you can proceed with the procedure. - -.Procedure - -. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#create-vpc-peering-connection-local[Initiate the VPC peering request]. - -. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#accept-vpc-peering-connection[Accept the VPC peering request]. - -. link:https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html[Update your Route tables for the VPC peering connection]. - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html[AWS VPC] guide. diff --git a/modules/aws-vpn.adoc b/modules/aws-vpn.adoc deleted file mode 100644 index 8f1c5afb1f4f..000000000000 --- a/modules/aws-vpn.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-vpn_{context}"] -= Configuring an AWS VPN - - -You can configure an {AWS} {product-title} cluster to use a customer’s on-site hardware Virtual Private Network (VPN) device. By default, instances that you launch into an AWS Virtual Private Cloud (VPC) cannot communicate with your own (remote) network. You can enable access to your remote network from your VPC by creating an AWS Site-to-Site VPN connection, and configuring routing to pass traffic through the connection. - -[NOTE] -==== -AWS VPN does not currently provide a managed option to apply NAT to VPN traffic. See the link:https://aws.amazon.com/premiumsupport/knowledge-center/configure-nat-for-vpn-traffic/[AWS Knowledge Center] for more details. - -Routing all traffic, for example `0.0.0.0/0`, through a private connection is not supported. This requires deleting the internet gateway, which disables SRE management traffic. -==== - -.Prerequisites - -* Hardware VPN gateway device model and software version, for example Cisco ASA running version 8.3. See the link:https://docs.aws.amazon.com/vpc/latest/adminguide/Introduction.html#DevicesTested[AWS documentation] to confirm whether your gateway device is supported by AWS. -* Public, static IP address for the VPN gateway device. -* BGP or static routing: if BGP, the ASN is required. If static routing, you must -configure at least one static route. -* Optional: IP and port/protocol of a reachable service to test the VPN connection. - -.Procedure - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-cgw[Create a customer gateway] to configure the VPN connection. - -. If you do not already have a Virtual Private Gateway attached to the intended VPC, link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-target-gateway[create and attach] a Virtual Private Gateway. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-route-tables[Configure routing and enable VPN route propagation]. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-security-groups[Update your security group]. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-vpn-connection[Establish the Site-to-Site VPN connection]. -+ -[NOTE] -==== -Note the VPC subnet information, which you must add to your configuration as the remote network. -==== - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html[AWS VPN] guide. diff --git a/modules/azure-stack-hub-internal-ca.adoc b/modules/azure-stack-hub-internal-ca.adoc deleted file mode 100644 index d882b1ea1c75..000000000000 --- a/modules/azure-stack-hub-internal-ca.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -:_content-type: PROCEDURE -[id="internal-certificate-authority_{context}"] -= Configuring the cluster to use an internal CA - -If the Azure Stack Hub environment is using an internal Certificate Authority (CA), update the `cluster-proxy-01-config.yaml file` to configure the cluster to use the internal CA. - -.Prerequisites - -* Create the `install-config.yaml` file and specify the certificate trust bundle in `.pem` format. -* Create the cluster manifests. - -.Procedure - -. From the directory in which the installation program creates files, go to the `manifests` directory. -. Add `user-ca-bundle` to the `spec.trustedCA.name` field. -+ -.Example `cluster-proxy-01-config.yaml` file -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -metadata: - creationTimestamp: null - name: cluster -spec: - trustedCA: - name: user-ca-bundle -status: {} ----- -. Optional: Back up the `manifests/ cluster-proxy-01-config.yaml` file. The installation program consumes the `manifests/` directory when you deploy the cluster. diff --git a/modules/backup-etcd-hosted-cluster.adoc b/modules/backup-etcd-hosted-cluster.adoc deleted file mode 100644 index 455fc4e25f9f..000000000000 --- a/modules/backup-etcd-hosted-cluster.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="backup-etcd-hosted-cluster_{context}"] -= Taking a snapshot of etcd on a hosted cluster - -As part of the process to back up etcd for a hosted cluster, you take a snapshot of etcd. After you take the snapshot, you can restore it, for example, as part of a disaster recovery operation. - -[IMPORTANT] -==== -This procedure requires API downtime. -==== - -.Procedure - -. Pause reconciliation of the hosted cluster by entering this command: -+ -[source,terminal] ----- -$ oc patch -n clusters hostedclusters/${CLUSTER_NAME} -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge ----- - -. Stop all etcd-writer deployments by entering this command: -+ -[source,terminal] ----- -$ oc scale deployment -n ${HOSTED_CLUSTER_NAMESPACE} --replicas=0 kube-apiserver openshift-apiserver openshift-oauth-apiserver ----- - -. Take an etcd snapshot by using the `exec` command in each etcd container: -+ -[source,terminal] ----- -$ oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- env ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/client/etcd-client-ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 snapshot save /var/lib/data/snapshot.db -$ oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- env ETCDCTL_API=3 /usr/bin/etcdctl -w table snapshot status /var/lib/data/snapshot.db ----- - -. Copy the snapshot data to a location where you can retrieve it later, such as an S3 bucket, as shown in the following example. -+ -[NOTE] -==== -The following example uses signature version 2. If you are in a region that supports signature version 4, such as the us-east-2 region, use signature version 4. Otherwise, if you use signature version 2 to copy the snapshot to an S3 bucket, the upload fails and signature version 2 is deprecated. -==== -+ -.Example -[source,terminal] ----- -BUCKET_NAME=somebucket -FILEPATH="/${BUCKET_NAME}/${CLUSTER_NAME}-snapshot.db" -CONTENT_TYPE="application/x-compressed-tar" -DATE_VALUE=`date -R` -SIGNATURE_STRING="PUT\n\n${CONTENT_TYPE}\n${DATE_VALUE}\n${FILEPATH}" -ACCESS_KEY=accesskey -SECRET_KEY=secret -SIGNATURE_HASH=`echo -en ${SIGNATURE_STRING} | openssl sha1 -hmac ${SECRET_KEY} -binary | base64` - -oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- curl -X PUT -T "/var/lib/data/snapshot.db" \ - -H "Host: ${BUCKET_NAME}.s3.amazonaws.com" \ - -H "Date: ${DATE_VALUE}" \ - -H "Content-Type: ${CONTENT_TYPE}" \ - -H "Authorization: AWS ${ACCESS_KEY}:${SIGNATURE_HASH}" \ - https://${BUCKET_NAME}.s3.amazonaws.com/${CLUSTER_NAME}-snapshot.db ----- - -. If you want to be able to restore the snapshot on a new cluster later, save the encryption secret that the hosted cluster references, as shown in this example: -+ -.Example -[source,terminal] ----- -oc get hostedcluster $CLUSTER_NAME -o=jsonpath='{.spec.secretEncryption.aescbc}' -{"activeKey":{"name":"CLUSTER_NAME-etcd-encryption-key"}} - -# Save this secret, or the key it contains so the etcd data can later be decrypted -oc get secret ${CLUSTER_NAME}-etcd-encryption-key -o=jsonpath='{.data.key}' ----- - -.Next steps - -Restore the etcd snapshot. diff --git a/modules/backup-etcd.adoc b/modules/backup-etcd.adoc deleted file mode 100644 index ab3985bf46ea..000000000000 --- a/modules/backup-etcd.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/backing-up-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="backing-up-etcd-data_{context}"] -= Backing up etcd data - -Follow these steps to back up etcd data by creating an etcd snapshot and backing up the resources for the static pods. This backup can be saved and used at a later time if you need to restore etcd. - -[IMPORTANT] -==== -Only save a backup from a single control plane host. Do not take a backup from each control plane host in the cluster. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have checked whether the cluster-wide proxy is enabled. -+ -[TIP] -==== -You can check whether the proxy is enabled by reviewing the output of `oc get proxy cluster -o yaml`. The proxy is enabled if the `httpProxy`, `httpsProxy`, and `noProxy` fields have values set. -==== - -.Procedure - -. Start a debug session for a control plane node: -+ -[source,terminal] ----- -$ oc debug node/ ----- - -. Change your root directory to `/host`: -+ -[source,terminal] ----- -sh-4.2# chroot /host ----- - -. If the cluster-wide proxy is enabled, be sure that you have exported the `NO_PROXY`, `HTTP_PROXY`, and `HTTPS_PROXY` environment variables. - -. Run the `cluster-backup.sh` script and pass in the location to save the backup to. -+ -[TIP] -==== -The `cluster-backup.sh` script is maintained as a component of the etcd Cluster Operator and is a wrapper around the `etcdctl snapshot save` command. -==== -+ -[source,terminal] ----- -sh-4.4# /usr/local/bin/cluster-backup.sh /home/core/assets/backup ----- -+ -.Example script output -[source,terminal] ----- -found latest kube-apiserver: /etc/kubernetes/static-pod-resources/kube-apiserver-pod-6 -found latest kube-controller-manager: /etc/kubernetes/static-pod-resources/kube-controller-manager-pod-7 -found latest kube-scheduler: /etc/kubernetes/static-pod-resources/kube-scheduler-pod-6 -found latest etcd: /etc/kubernetes/static-pod-resources/etcd-pod-3 -ede95fe6b88b87ba86a03c15e669fb4aa5bf0991c180d3c6895ce72eaade54a1 -etcdctl version: 3.4.14 -API version: 3.4 -{"level":"info","ts":1624647639.0188997,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db.part"} -{"level":"info","ts":"2021-06-25T19:00:39.030Z","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"} -{"level":"info","ts":1624647639.0301006,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"https://10.0.0.5:2379"} -{"level":"info","ts":"2021-06-25T19:00:40.215Z","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"} -{"level":"info","ts":1624647640.6032252,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"https://10.0.0.5:2379","size":"114 MB","took":1.584090459} -{"level":"info","ts":1624647640.6047094,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db"} -Snapshot saved at /home/core/assets/backup/snapshot_2021-06-25_190035.db -{"hash":3866667823,"revision":31407,"totalKey":12828,"totalSize":114446336} -snapshot db and kube resources are successfully saved to /home/core/assets/backup ----- -+ -In this example, two files are created in the `/home/core/assets/backup/` directory on the control plane host: - -* `snapshot_.db`: This file is the etcd snapshot. The `cluster-backup.sh` script confirms its validity. -* `static_kuberesources_.tar.gz`: This file contains the resources for the static pods. If etcd encryption is enabled, it also contains the encryption keys for the etcd snapshot. -+ -[NOTE] -==== -If etcd encryption is enabled, it is recommended to store this second file separately from the etcd snapshot for security reasons. However, this file is required to restore from the etcd snapshot. - -Keep in mind that etcd encryption only encrypts values, not keys. This means that resource types, namespaces, and object names are unencrypted. -==== diff --git a/modules/baremetal-event-relay.adoc b/modules/baremetal-event-relay.adoc deleted file mode 100644 index 9cf801e6b3b3..000000000000 --- a/modules/baremetal-event-relay.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -[id="baremetal-event-relay_{context}"] -= {redfish-operator} - -[discrete] -== Purpose -The OpenShift {redfish-operator} manages the life-cycle of the Bare Metal Event Relay. The Bare Metal Event Relay enables you to configure the types of cluster event that are monitored using Redfish hardware events. - -[discrete] -== Configuration objects -You can use this command to edit the configuration after installation: for example, the webhook port. -You can edit configuration objects with: - -[source,terminal] ----- -$ oc -n [namespace] edit cm hw-event-proxy-operator-manager-config ----- - -[source,terminal] ----- -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 -kind: ControllerManagerConfig -health: - healthProbeBindAddress: :8081 -metrics: - bindAddress: 127.0.0.1:8080 -webhook: - port: 9443 -leaderElection: - leaderElect: true - resourceName: 6e7a703c.redhat-cne.org ----- - -[discrete] -== Project -link:https://github.com/redhat-cne/hw-event-proxy-operator[hw-event-proxy-operator] - -[discrete] -== CRD -The proxy enables applications running on bare-metal clusters to respond quickly to Redfish hardware changes and failures such as breaches of temperature thresholds, fan failure, disk loss, power outages, and memory failure, reported using the HardwareEvent CR. - -`hardwareevents.event.redhat-cne.org`: - -* Scope: Namespaced -* CR: HardwareEvent -* Validation: Yes diff --git a/modules/baseline-router-performance.adoc b/modules/baseline-router-performance.adoc deleted file mode 100644 index 9ec6a14c9d19..000000000000 --- a/modules/baseline-router-performance.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -[id="baseline-router-performance_{context}"] -= Baseline Ingress Controller (router) performance - -The {product-title} Ingress Controller, or router, is the ingress point for ingress traffic for applications and services that are configured using routes and ingresses. - -When evaluating a single HAProxy router performance in terms of HTTP requests handled per second, the performance varies depending on many factors. In particular: - -* HTTP keep-alive/close mode - -* Route type - -* TLS session resumption client support - -* Number of concurrent connections per target route - -* Number of target routes - -* Back end server page size - -* Underlying infrastructure (network/SDN solution, CPU, and so on) - -While performance in your specific environment will vary, Red Hat lab tests on a public cloud instance of size 4 vCPU/16GB RAM. A single HAProxy router handling 100 routes terminated by backends serving 1kB static pages is able to handle the following number of transactions per second. - -In HTTP keep-alive mode scenarios: - -[cols="3",options="header"] -|=== -|*Encryption* |*LoadBalancerService*|*HostNetwork* -|none |21515|29622 -|edge |16743|22913 -|passthrough |36786|53295 -|re-encrypt |21583|25198 -|=== - -In HTTP close (no keep-alive) scenarios: - -[cols="3",options="header"] -|=== -|*Encryption* |*LoadBalancerService*|*HostNetwork* -|none |5719|8273 -|edge |2729|4069 -|passthrough |4121|5344 -|re-encrypt |2320|2941 -|=== - -The default Ingress Controller configuration was used with the `spec.tuningOptions.threadCount` field set to `4`. Two different endpoint publishing strategies were tested: Load Balancer Service and Host Network. TLS session resumption was used for encrypted routes. With HTTP keep-alive, a single HAProxy router is capable of saturating a 1 Gbit NIC at page sizes as small as 8 kB. - -When running on bare metal with modern processors, you can expect roughly twice the performance of the public cloud instance above. This overhead is introduced by the virtualization layer in place on public clouds and holds mostly true for private cloud-based virtualization as well. The following table is a guide to how many applications to use behind the router: - -[cols="2,4",options="header"] -|=== -|*Number of applications* |*Application type* -|5-10 |static file/web server or caching proxy -|100-1000 |applications generating dynamic content - -|=== - -In general, HAProxy can support routes for up to 1000 applications, depending on the technology in use. Ingress Controller performance might be limited by the -capabilities and performance of the applications behind it, such as language or static versus dynamic content. - -Ingress, or router, sharding should be used to serve more routes towards applications and help horizontally scale the routing tier. diff --git a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc b/modules/binding-infra-node-workloads-using-taints-tolerations.adoc deleted file mode 100644 index 618adb487fac..000000000000 --- a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="binding-infra-node-workloads-using-taints-tolerations_{context}"] -= Binding infrastructure node workloads using taints and tolerations - -If you have an infra node that has the `infra` and `worker` roles assigned, you must configure the node so that user workloads are not assigned to it. - -[IMPORTANT] -==== -It is recommended that you preserve the dual `infra,worker` label that is created for infra nodes and use taints and tolerations to manage nodes that user workloads are scheduled on. If you remove the `worker` label from the node, you must create a custom pool to manage it. A node with a label other than `master` or `worker` is not recognized by the MCO without a custom pool. Maintaining the `worker` label allows the node to be managed by the default worker machine config pool, if no custom pools that select the custom label exists. The `infra` label communicates to the cluster that it does not count toward the total number of subscriptions. -==== - -.Prerequisites - -* Configure additional `MachineSet` objects in your {product-title} cluster. - -.Procedure - -. Add a taint to the infra node to prevent scheduling user workloads on it: - -.. Determine if the node has the taint: -+ -[source,terminal] ----- -$ oc describe nodes ----- -+ -.Sample output -[source,text] ----- -oc describe node ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Name: ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Roles: worker - ... -Taints: node-role.kubernetes.io/infra:NoSchedule - ... ----- -+ -This example shows that the node has a taint. You can proceed with adding a toleration to your pod in the next step. - -.. If you have not configured a taint to prevent scheduling user workloads on it: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 node-role.kubernetes.io/infra=reserved:NoExecute ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: node-role.kubernetes.io/infra - effect: NoExecute - value: reserved - ... ----- -==== -+ -This example places a taint on `node1` that has key `node-role.kubernetes.io/infra` and taint effect `NoSchedule`. Nodes with the `NoSchedule` effect schedule only pods that tolerate the taint, but allow existing pods to remain scheduled on the node. -+ -[NOTE] -==== -If a descheduler is used, pods violating node taints could be evicted from the cluster. -==== - -. Add tolerations for the pod configurations you want to schedule on the infra node, like router, registry, and monitoring workloads. Add the following code to the `Pod` object specification: -+ -[source,yaml] ----- -tolerations: - - effect: NoExecute <1> - key: node-role.kubernetes.io/infra <2> - operator: Exists <3> - value: reserved <4> ----- -<1> Specify the effect that you added to the node. -<2> Specify the key that you added to the node. -<3> Specify the `Exists` Operator to require a taint with the key `node-role.kubernetes.io/infra` to be present on the node. -<4> Specify the value of the key-value pair taint that you added to the node. -+ -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto the infra node. -+ -[NOTE] -==== -Moving pods for an Operator installed via OLM to an infra node is not always possible. The capability to move Operator pods depends on the configuration of each Operator. -==== - -. Schedule the pod to the infra node using a scheduler. See the documentation for _Controlling pod placement onto nodes_ for details. diff --git a/modules/bmo-about-the-bare-metal-operator.adoc b/modules/bmo-about-the-bare-metal-operator.adoc deleted file mode 100644 index c0d7a33fd257..000000000000 --- a/modules/bmo-about-the-bare-metal-operator.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_module-type: CONCEPT -[id="bmo-about-the-bare-metal-operator_{context}"] -= About the Bare Metal Operator - -Use the Bare Metal Operator (BMO) to provision, manage, and inspect bare-metal hosts in your cluster. - -The BMO uses three resources to complete these tasks: - -* `BareMetalHost` -* `HostFirmwareSettings` -* `FirmwareSchema` - -The BMO maintains an inventory of the physical hosts in the cluster by mapping each bare-metal host to an instance of the `BareMetalHost` custom resource definition. Each `BareMetalHost` resource features hardware, software, and firmware details. The BMO continually inspects the bare-metal hosts in the cluster to ensure each `BareMetalHost` resource accurately details the components of the corresponding host. - -The BMO also uses the `HostFirmwareSettings` resource and the `FirmwareSchema` resource to detail firmware specifications for the bare-metal host. - -The BMO interfaces with bare-metal hosts in the cluster by using the Ironic API service. The Ironic service uses the Baseboard Management Controller (BMC) on the host to interface with the machine. - -Some common tasks you can complete by using the BMO include the following: - -* Provision bare-metal hosts to the cluster with a specific image -* Format a host's disk contents before provisioning or after deprovisioning -* Turn on or off a host -* Change firmware settings -* View the host's hardware details \ No newline at end of file diff --git a/modules/bmo-about-the-baremetalhost-resource.adoc b/modules/bmo-about-the-baremetalhost-resource.adoc deleted file mode 100644 index 86168debc9b5..000000000000 --- a/modules/bmo-about-the-baremetalhost-resource.adoc +++ /dev/null @@ -1,323 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-baremetalhost-resource_{context}"] -= About the BareMetalHost resource - -Metal^3^ introduces the concept of the `BareMetalHost` resource, which defines a physical host and its properties. The `BareMetalHost` resource contains two sections: - -. The `BareMetalHost` spec -. The `BareMetalHost` status - -== The BareMetalHost spec - -The `spec` section of the `BareMetalHost` resource defines the desired state of the host. - -.BareMetalHost spec -[options="header"] -|==== -|Parameters |Description - -| `automatedCleaningMode` -| An interface to enable or disable automated cleaning during provisioning and de-provisioning. When set to `disabled`, it skips automated cleaning. When set to `metadata`, automated cleaning is enabled. The default setting is `metadata`. - -a| ----- -bmc: - address: - credentialsName: - disableCertificateVerification: ----- -a| The `bmc` configuration setting contains the connection information for the baseboard management controller (BMC) on the host. The fields are: - -* `address`: The URL for communicating with the host's BMC controller. - -* `credentialsName`: A reference to a secret containing the username and password for the BMC. - -* `disableCertificateVerification`: A boolean to skip certificate validation when set to `true`. - -| `bootMACAddress` -| The MAC address of the NIC used for provisioning the host. - -| `bootMode` -| The boot mode of the host. It defaults to `UEFI`, but it can also be set to `legacy` for BIOS boot, or `UEFISecureBoot`. - -| `consumerRef` -| A reference to another resource that is using the host. It could be empty if another resource is not currently using the host. For example, a `Machine` resource might use the host when the `machine-api` is using the host. - -| `description` -| A human-provided string to help identify the host. - -| `externallyProvisioned` -a| A boolean indicating whether the host provisioning and deprovisioning are managed externally. When set: - -* Power status can still be managed using the online field. -* Hardware inventory will be monitored, but no provisioning or deprovisioning operations are performed on the host. - -| `firmware` -a| Contains information about the BIOS configuration of bare metal hosts. Currently, `firmware` is only supported by iRMC, iDRAC, iLO4 and iLO5 BMCs. The sub fields are: - -** `simultaneousMultithreadingEnabled`: Allows a single physical processor core to appear as several logical processors. Valid settings are `true` or `false`. -** `sriovEnabled`: SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. Valid settings are `true` or `false`. -** `virtualizationEnabled`: Supports the virtualization of platform hardware. Valid settings are `true` or `false`. - -a| ----- -image: - url: - checksum: - checksumType: - format: ----- -a| The `image` configuration setting holds the details for the image to be deployed on the host. Ironic requires the image fields. However, when the `externallyProvisioned` configuration setting is set to `true` and the external management doesn't require power control, the fields can be empty. The fields are: - -* `url`: The URL of an image to deploy to the host. -* `checksum`: The actual checksum or a URL to a file containing the checksum for the image at `image.url`. -* `checksumType`: You can specify checksum algorithms. Currently `image.checksumType` only supports `md5`, `sha256`, and `sha512`. The default checksum type is `md5`. -* `format`: This is the disk format of the image. It can be one of `raw`, `qcow2`, `vdi`, `vmdk`, `live-iso` or be left unset. Setting it to `raw` enables raw image streaming in the Ironic agent for that image. Setting it to `live-iso` enables iso images to live boot without deploying to disk, and it ignores the `checksum` fields. - -| `networkData` -| A reference to the secret containing the network configuration data and its namespace, so that it can be attached to the host before the host boots to set up the network. - -| `online` -| A boolean indicating whether the host should be powered on (`true`) or off (`false`). Changing this value will trigger a change in the power state of the physical host. - -a| ----- -raid: - hardwareRAIDVolumes: - softwareRAIDVolumes: ----- -a| (Optional) Contains the information about the RAID configuration for bare metal hosts. If not specified, it retains the current configuration. - -[NOTE] -==== -{product-title} {product-version} supports hardware RAID for BMCs using the iRMC protocol only. {product-title} {product-version} does not support software RAID. -==== - -See the following configuration settings: - -* `hardwareRAIDVolumes`: Contains the list of logical drives for hardware RAID, and defines the desired volume configuration in the hardware RAID. If you don't specify `rootDeviceHints`, the first volume is the root volume. The sub-fields are: - -** `level`: The RAID level for the logical drive. The following levels are supported: `0`,`1`,`2`,`5`,`6`,`1+0`,`5+0`,`6+0`. -** `name`: The name of the volume as a string. It should be unique within the server. If not specified, the volume name will be auto-generated. -** `numberOfPhysicalDisks`: The number of physical drives as an integer to use for the logical drove. Defaults to the minimum number of disk drives required for the particular RAID level. -** `physicalDisks`: The list of names of physical disk drives as a string. This is an optional field. If specified, the controller field must be specified too. -** `controller`: (Optional) The name of the RAID controller as a string to use in the hardware RAID volume. -** `rotational`: If set to `true`, it will only select rotational disk drives. If set to `false`, it will only select solid-state and NVMe drives. If not set, it selects any drive types, which is the default behavior. -** `sizeGibibytes`: The size of the logical drive as an integer to create in GiB. If unspecified or set to `0`, it will use the maximum capacity of physical drive for the logical drive. - -* `softwareRAIDVolumes`: {product-title} {product-version} does not support software RAID. The following information is for reference only. This configuration contains the list of logical disks for software RAID. If you don't specify `rootDeviceHints`, the first volume is the root volume. If you set `HardwareRAIDVolumes`, this item will be invalid. Software RAIDs will always be deleted. The number of created software RAID devices must be `1` or `2`. If there is only one software RAID device, it must be `RAID-1`. If there are two RAID devices, the first device must be `RAID-1`, while the RAID level for the second device can be `0`, `1`, or `1+0`. The first RAID device will be the deployment device. Therefore, enforcing `RAID-1` reduces the risk of a non-booting node in case of a device failure. The `softwareRAIDVolume` field defines the desired configuration of the volume in the software RAID. The sub-fields are: - -** `level`: The RAID level for the logical drive. The following levels are supported: `0`,`1`,`1+0`. -** `physicalDisks`: A list of device hints. The number of items should be greater than or equal to `2`. -** `sizeGibibytes`: The size of the logical disk drive as an integer to be created in GiB. If unspecified or set to `0`, it will use the maximum capacity of physical drive for logical drive. - -You can set the `hardwareRAIDVolume` as an empty slice to clear the hardware RAID configuration. For example: - ----- -spec: - raid: - hardwareRAIDVolume: [] ----- - -If you receive an error message indicating that the driver does not support RAID, set the `raid`, `hardwareRAIDVolumes` or `softwareRAIDVolumes` to nil. You might need to ensure the host has a RAID controller. - -a| ----- -rootDeviceHints: - deviceName: - hctl: - model: - vendor: - serialNumber: - minSizeGigabytes: - wwn: - wwnWithExtension: - wwnVendorExtension: - rotational: ----- -a| The `rootDeviceHints` parameter enables provisioning of the {op-system} image to a particular device. It examines the devices in the order it discovers them, and compares the discovered values with the hint values. It uses the first discovered device that matches the hint value. The configuration can combine multiple hints, but a device must match all hints to get selected. The fields are: - -* `deviceName`: A string containing a Linux device name like `/dev/vda`. The hint must match the actual value exactly. - -* `hctl`: A string containing a SCSI bus address like `0:0:0:0`. The hint must match the actual value exactly. - -* `model`: A string containing a vendor-specific device identifier. The hint can be a substring of the actual value. - -* `vendor`: A string containing the name of the vendor or manufacturer of the device. The hint can be a sub-string of the actual value. - -* `serialNumber`: A string containing the device serial number. The hint must match the actual value exactly. - -* `minSizeGigabytes`: An integer representing the minimum size of the device in gigabytes. - -* `wwn`: A string containing the unique storage identifier. The hint must match the actual value exactly. - -* `wwnWithExtension`: A string containing the unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. - -* `wwnVendorExtension`: A string containing the unique vendor storage identifier. The hint must match the actual value exactly. - -* `rotational`: A boolean indicating whether the device should be a rotating disk (true) or not (false). - -|==== - -== The BareMetalHost status - -The `BareMetalHost` status represents the host's current state, and includes tested credentials, current hardware details, and other information. - - -.BareMetalHost status -[options="header"] -|==== -|Parameters |Description - -| `goodCredentials` -| A reference to the secret and its namespace holding the last set of baseboard management controller (BMC) credentials the system was able to validate as working. - -| `errorMessage` -| Details of the last error reported by the provisioning backend, if any. - -| `errorType` -a| Indicates the class of problem that has caused the host to enter an error state. The error types are: - -* `provisioned registration error`: Occurs when the controller is unable to re-register an already provisioned host. -* `registration error`: Occurs when the controller is unable to connect to the host's baseboard management controller. -* `inspection error`: Occurs when an attempt to obtain hardware details from the host fails. -* `preparation error`: Occurs when cleaning fails. -* `provisioning error`: Occurs when the controller fails to provision or deprovision the host. -* `power management error`: Occurs when the controller is unable to modify the power state of the host. -* `detach error`: Occurs when the controller is unable to detatch the host from the provisioner. - -a| ----- -hardware: - cpu - arch: - model: - clockMegahertz: - flags: - count: ----- -a| The `hardware.cpu` field details of the CPU(s) in the system. The fields include: - -* `arch`: The architecture of the CPU. -* `model`: The CPU model as a string. -* `clockMegahertz`: The speed in MHz of the CPU. -* `flags`: The list of CPU flags. For example, `'mmx','sse','sse2','vmx'` etc. -* `count`: The number of CPUs available in the system. - -a| ----- -hardware: - firmware: ----- -| Contains BIOS firmware information. For example, the hardware vendor and version. - -a| ----- -hardware: - nics: - - ip: - name: - mac: - speedGbps: - vlans: - vlanId: - pxe: ----- -a| The `hardware.nics` field contains a list of network interfaces for the host. The fields include: - -* `ip`: The IP address of the NIC, if one was assigned when the discovery agent ran. -* `name`: A string identifying the network device. For example, `nic-1`. -* `mac`: The MAC address of the NIC. -* `speedGbps`: The speed of the device in Gbps. -* `vlans`: A list holding all the VLANs available for this NIC. -* `vlanId`: The untagged VLAN ID. -* `pxe`: Whether the NIC is able to boot using PXE. - -a| ----- -hardware: - ramMebibytes: ----- -| The host's amount of memory in Mebibytes (MiB). - -a| ----- -hardware: - storage: - - name: - rotational: - sizeBytes: - serialNumber: ----- -a| The `hardware.storage` field contains a list of storage devices available to the host. The fields include: - -* `name`: A string identifying the storage device. For example, `disk 1 (boot)`. -* `rotational`: Indicates whether the disk is rotational, and returns either `true` or `false`. -* `sizeBytes`: The size of the storage device. -* `serialNumber`: The device's serial number. - -a| ----- -hardware: - systemVendor: - manufacturer: - productName: - serialNumber: ----- -| Contains information about the host's `manufacturer`, the `productName`, and the `serialNumber`. - - -| `lastUpdated` -| The timestamp of the last time the status of the host was updated. - -| `operationalStatus` -a| The status of the server. The status is one of the following: - -* `OK`: Indicates all the details for the host are known, correctly configured, working, and manageable. -* `discovered`: Implies some of the host's details are either not working correctly or missing. For example, the BMC address is known but the login credentials are not. -* `error`: Indicates the system found some sort of irrecoverable error. Refer to the `errorMessage` field in the status section for more details. -* `delayed`: Indicates that provisioning is delayed to limit simultaneous provisioning of multiple hosts. -* `detached`: Indicates the host is marked `unmanaged`. - -| `poweredOn` -| Boolean indicating whether the host is powered on. - -a| ----- -provisioning: - state: - id: - image: - raid: - firmware: - rootDeviceHints: ----- -a| The `provisioning` field contains values related to deploying an image to the host. The sub-fields include: - -* `state`: The current state of any ongoing provisioning operation. The states include: -** ``: There is no provisioning happening at the moment. -** `unmanaged`: There is insufficient information available to register the host. -** `registering`: The agent is checking the host's BMC details. -** `match profile`: The agent is comparing the discovered hardware details on the host against known profiles. -** `available`: The host is available for provisioning. This state was previously known as `ready`. -** `preparing`: The existing configuration will be removed, and the new configuration will be set on the host. -** `provisioning`: The provisioner is writing an image to the host's storage. -** `provisioned`: The provisioner wrote an image to the host's storage. -** `externally provisioned`: Metal^3^ does not manage the image on the host. -** `deprovisioning`: The provisioner is wiping the image from the host's storage. -** `inspecting`: The agent is collecting hardware details for the host. -** `deleting`: The agent is deleting the from the cluster. -* `id`: The unique identifier for the service in the underlying provisioning tool. -* `image`: The image most recently provisioned to the host. -* `raid`: The list of hardware or software RAID volumes recently set. -* `firmware`: The BIOS configuration for the bare metal server. -* `rootDeviceHints`: The root device selection instructions used for the most recent provisioning operation. - -| `triedCredentials` -| A reference to the secret and its namespace holding the last set of BMC credentials that were sent to the provisioning backend. - -|==== diff --git a/modules/bmo-about-the-firmwareschema-resource.adoc b/modules/bmo-about-the-firmwareschema-resource.adoc deleted file mode 100644 index 96228e915345..000000000000 --- a/modules/bmo-about-the-firmwareschema-resource.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-firmwareschema-resource_{context}"] -= About the FirmwareSchema resource - -BIOS settings vary among hardware vendors and host models. A `FirmwareSchema` resource is a read-only resource that contains the types and limits for each BIOS setting on each host model. The data comes directly from the BMC through Ironic. The `FirmwareSchema` enables you to identify valid values you can specify in the `spec` field of the `HostFirmwareSettings` resource. The `FirmwareSchema` resource has a unique identifier derived from its settings and limits. Identical host models use the same `FirmwareSchema` identifier. It is likely that multiple instances of `HostFirmwareSettings` use the same `FirmwareSchema`. - -.FirmwareSchema specification -[options="header"] -|==== -|Parameters|Description - -a| ----- - - attribute_type: - allowable_values: - lower_bound: - upper_bound: - min_length: - max_length: - read_only: - unique: ----- - -a| The `spec` is a simple map consisting of the BIOS setting name and the limits of the setting. The fields include: - -* `attribute_type`: The type of setting. The supported types are: -** `Enumeration` -** `Integer` -** `String` -** `Boolean` -* `allowable_values`: A list of allowable values when the `attribute_type` is `Enumeration`. -* `lower_bound`: The lowest allowed value when `attribute_type` is `Integer`. -* `upper_bound`: The highest allowed value when `attribute_type` is `Integer`. -* `min_length`: The shortest string length that the value can have when `attribute_type` is `String`. -* `max_length`: The longest string length that the value can have when `attribute_type` is `String`. -* `read_only`: The setting is read only and cannot be modified. -* `unique`: The setting is specific to this host. - -|==== diff --git a/modules/bmo-about-the-hostfirmwaresettings-resource.adoc b/modules/bmo-about-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index e10a31dacab7..000000000000 --- a/modules/bmo-about-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-hostfirmwaresettings-resource_{context}"] -= About the HostFirmwareSettings resource - -You can use the `HostFirmwareSettings` resource to retrieve and manage the BIOS settings for a host. When a host moves to the `Available` state, Ironic reads the host's BIOS settings and creates the `HostFirmwareSettings` resource. The resource contains the complete BIOS configuration returned from the baseboard management controller (BMC). Whereas, the `firmware` field in the `BareMetalHost` resource returns three vendor-independent fields, the `HostFirmwareSettings` resource typically comprises many BIOS settings of vendor-specific fields per host. - -The `HostFirmwareSettings` resource contains two sections: - -. The `HostFirmwareSettings` spec. -. The `HostFirmwareSettings` status. - -== The `HostFirmwareSettings` spec - -The `spec` section of the `HostFirmwareSettings` resource defines the desired state of the host's BIOS, and it is empty by default. Ironic uses the settings in the `spec.settings` section to update the baseboard management controller (BMC) when the host is in the `Preparing` state. Use the `FirmwareSchema` resource to ensure that you do not send invalid name/value pairs to hosts. See "About the FirmwareSchema resource" for additional details. - -.Example -[source,terminal] ----- -spec: - settings: - ProcTurboMode: Disabled<1> ----- -<1> In the foregoing example, the `spec.settings` section contains a name/value pair that will set the `ProcTurboMode` BIOS setting to `Disabled`. - -[NOTE] -==== -Integer parameters listed in the `status` section appear as strings. For example, `"1"`. When setting integers in the `spec.settings` section, the values should be set as integers without quotes. For example, `1`. -==== - -== The `HostFirmwareSettings` status - -The `status` represents the current state of the host's BIOS. - -.HostFirmwareSettings -[options="header"] -|==== -|Parameters|Description -a| ----- -status: - conditions: - - lastTransitionTime: - message: - observedGeneration: - reason: - status: - type: ----- -a| The `conditions` field contains a list of state changes. The sub-fields include: - -* `lastTransitionTime`: The last time the state changed. -* `message`: A description of the state change. -* `observedGeneration`: The current generation of the `status`. If `metadata.generation` and this field are not the same, the `status.conditions` might be out of date. -* `reason`: The reason for the state change. -* `status`: The status of the state change. The status can be `True`, `False` or `Unknown`. -* `type`: The type of state change. The types are `Valid` and `ChangeDetected`. - -a| ----- -status: - schema: - name: - namespace: - lastUpdated: ----- -a| The `FirmwareSchema` for the firmware settings. The fields include: - -* `name`: The name or unique identifier referencing the schema. -* `namespace`: The namespace where the schema is stored. -* `lastUpdated`: The last time the resource was updated. - -a| ----- -status: - settings: ----- -| The `settings` field contains a list of name/value pairs of a host's current BIOS settings. - -|==== diff --git a/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc b/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index 3ff058b6689a..000000000000 --- a/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="editing-the-hostfirmwaresettings-resource_{context}"] -= Editing the HostFirmwareSettings resource - -You can edit the `HostFirmwareSettings` of provisioned hosts. - -[IMPORTANT] -==== -You can only edit hosts when they are in the `provisioned` state, excluding read-only values. You cannot edit hosts in the `externally provisioned` state. - -==== - -.Procedure - -. Get the list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Edit a host's `HostFirmwareSettings` resource: -+ -[source,terminal] ----- -$ oc edit hfs -n openshift-machine-api ----- -+ -Where `` is the name of a provisioned host. The `HostFirmwareSettings` resource will open in the default editor for your terminal. - -. Add name/value pairs to the `spec.settings` section: -+ -.Example -[source,terminal] ----- -spec: - settings: - name: value <1> ----- -<1> Use the `FirmwareSchema` resource to identify the available settings for the host. You cannot set values that are read-only. - -. Save the changes and exit the editor. - -. Get the host's machine name: -+ -[source,terminal] ----- - $ oc get bmh -n openshift-machine name ----- -+ -Where `` is the name of the host. The machine name appears under the `CONSUMER` field. - -. Annotate the machine to delete it from the machineset: -+ -[source,terminal] ----- -$ oc annotate machine machine.openshift.io/delete-machine=true -n openshift-machine-api ----- -+ -Where `` is the name of the machine to delete. - -. Get a list of nodes and count the number of worker nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. Get the machineset: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- - -. Scale the machineset: -+ -[source,terminal] ----- -$ oc scale machineset -n openshift-machine-api --replicas= ----- -+ -Where `` is the name of the machineset and `` is the decremented number of worker nodes. - -. When the host enters the `Available` state, scale up the machineset to make the `HostFirmwareSettings` resource changes take effect: -+ -[source,terminal] ----- -$ oc scale machineset -n openshift-machine-api --replicas= ----- -+ -Where `` is the name of the machineset and `` is the number of worker nodes. diff --git a/modules/bmo-getting-the-baremetalhost-resource.adoc b/modules/bmo-getting-the-baremetalhost-resource.adoc deleted file mode 100644 index 689e3137e5ff..000000000000 --- a/modules/bmo-getting-the-baremetalhost-resource.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc -:_content-type: PROCEDURE -[id="getting-the-baremetalhost-resource_{context}"] -= Getting the BareMetalHost resource - -The `BareMetalHost` resource contains the properties of a physical host. You must get the `BareMetalHost` resource for a physical host to review its properties. - -.Procedure - -. Get the list of `BareMetalHost` resources: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api -o yaml ----- -+ -[NOTE] -==== -You can use `baremetalhost` as the long form of `bmh` with `oc get` command. -==== - -. Get the list of hosts: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api ----- - -. Get the `BareMetalHost` resource for a specific host: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the host. -+ -.Example output -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - creationTimestamp: "2022-06-16T10:48:33Z" - finalizers: - - baremetalhost.metal3.io - generation: 2 - name: openshift-worker-0 - namespace: openshift-machine-api - resourceVersion: "30099" - uid: 1513ae9b-e092-409d-be1b-ad08edeb1271 -spec: - automatedCleaningMode: metadata - bmc: - address: redfish://10.46.61.19:443/redfish/v1/Systems/1 - credentialsName: openshift-worker-0-bmc-secret - disableCertificateVerification: true - bootMACAddress: 48:df:37:c7:f7:b0 - bootMode: UEFI - consumerRef: - apiVersion: machine.openshift.io/v1beta1 - kind: Machine - name: ocp-edge-958fk-worker-0-nrfcg - namespace: openshift-machine-api - customDeploy: - method: install_coreos - hardwareProfile: unknown - online: true - rootDeviceHints: - deviceName: /dev/sda - userData: - name: worker-user-data-managed - namespace: openshift-machine-api -status: - errorCount: 0 - errorMessage: "" - goodCredentials: - credentials: - name: openshift-worker-0-bmc-secret - namespace: openshift-machine-api - credentialsVersion: "16120" - hardware: - cpu: - arch: x86_64 - clockMegahertz: 2300 - count: 64 - flags: - - 3dnowprefetch - - abm - - acpi - - adx - - aes - model: Intel(R) Xeon(R) Gold 5218 CPU @ 2.30GHz - firmware: - bios: - date: 10/26/2020 - vendor: HPE - version: U30 - hostname: openshift-worker-0 - nics: - - mac: 48:df:37:c7:f7:b3 - model: 0x8086 0x1572 - name: ens1f3 - ramMebibytes: 262144 - storage: - - hctl: "0:0:0:0" - model: VK000960GWTTB - name: /dev/sda - sizeBytes: 960197124096 - type: SSD - vendor: ATA - systemVendor: - manufacturer: HPE - productName: ProLiant DL380 Gen10 (868703-B21) - serialNumber: CZ200606M3 - hardwareProfile: unknown - lastUpdated: "2022-06-16T11:41:42Z" - operationalStatus: OK - poweredOn: true - provisioning: - ID: 217baa14-cfcf-4196-b764-744e184a3413 - bootMode: UEFI - customDeploy: - method: install_coreos - image: - url: "" - raid: - hardwareRAIDVolumes: null - softwareRAIDVolumes: [] - rootDeviceHints: - deviceName: /dev/sda - state: provisioned - triedCredentials: - credentials: - name: openshift-worker-0-bmc-secret - namespace: openshift-machine-api - credentialsVersion: "16120" - ----- diff --git a/modules/bmo-getting-the-firmwareschema-resource.adoc b/modules/bmo-getting-the-firmwareschema-resource.adoc deleted file mode 100644 index 4c43701107d6..000000000000 --- a/modules/bmo-getting-the-firmwareschema-resource.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="getting-the-firmwareschema-resource_{context}"] -= Getting the FirmwareSchema resource - -Each host model from each vendor has different BIOS settings. When editing the `HostFirmwareSettings` resource's `spec` section, the name/value pairs you set must conform to that host's firmware schema. To ensure you are setting valid name/value pairs, get the `FirmwareSchema` for the host and review it. - -.Procedure - -. To get a list of `FirmwareSchema` resource instances, execute the following: -+ -[source,terminal] ----- -$ oc get firmwareschema -n openshift-machine-api ----- - -. To get a particular `FirmwareSchema` instance, execute: -+ -[source,terminal] ----- -$ oc get firmwareschema -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the schema instance stated in the `HostFirmwareSettings` resource (see Table 3). diff --git a/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc b/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index 6a6c3e2b56b2..000000000000 --- a/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -[id="getting-the-hostfirmwaresettings-resource_{context}"] -= Getting the HostFirmwareSettings resource - -The `HostFirmwareSettings` resource contains the vendor-specific BIOS properties of a physical host. You must get the `HostFirmwareSettings` resource for a physical host to review its BIOS properties. - -.Procedure - -. Get the detailed list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api -o yaml ----- -+ -[NOTE] -==== -You can use `hostfirmwaresettings` as the long form of `hfs` with the `oc get` command. -==== - -. Get the list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Get the `HostFirmwareSettings` resource for a particular host -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the host. diff --git a/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc b/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc deleted file mode 100644 index a358314edcca..000000000000 --- a/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="verifying-the-hostfirmware-settings-resource-is-valid_{context}"] -= Verifying the HostFirmware Settings resource is valid - -When the user edits the `spec.settings` section to make a change to the `HostFirmwareSetting`(HFS) resource, the Bare Metal Operator (BMO) validates the change against the `FimwareSchema` resource, which is a read-only resource. If the setting is invalid, the BMO will set the `Type` value of the `status.Condition` setting to `False` and also generate an event and store it in the HFS resource. Use the following procedure to verify that the resource is valid. - -.Procedure - -. Get a list of `HostFirmwareSetting` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Verify that the `HostFirmwareSettings` resource for a particular host is valid: -+ -[source,terminal] ----- -$ oc describe hfs -n openshift-machine-api ----- -+ -Where `` is the name of the host. -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ValidationFailed 2m49s metal3-hostfirmwaresettings-controller Invalid BIOS setting: Setting ProcTurboMode is invalid, unknown enumeration value - Foo ----- -+ -[IMPORTANT] -==== -If the response returns `ValidationFailed`, there is an error in the resource configuration and you must update the values to conform to the `FirmwareSchema` resource. -==== diff --git a/modules/bootstrap-aws-load-balancer-operator.adoc b/modules/bootstrap-aws-load-balancer-operator.adoc deleted file mode 100644 index 328f354d15b5..000000000000 --- a/modules/bootstrap-aws-load-balancer-operator.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-bootstra-albo-on-sts-cluster_{context}"] -= Bootstrapping AWS Load Balancer Operator on Security Token Service cluster - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. Create the `aws-load-balancer-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc create namespace aws-load-balancer-operator ----- - -. Download the `CredentialsRequest` custom resource (CR) of the AWS Load Balancer Operator, and create a directory to store it by running the following command: -+ -[source,terminal] ----- -$ curl --create-dirs -o /cr.yaml https://raw.githubusercontent.com/openshift/aws-load-balancer-operator/main/hack/operator-credentials-request.yaml ----- - -. Use the `ccoctl` tool to process `CredentialsRequest` objects of the AWS Load Balancer Operator, by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name --region= \ - --credentials-requests-dir= \ - --identity-provider-arn ----- - -. Apply the secrets generated in the manifests directory of your cluster by running the following command: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify that the credentials secret of the AWS Load Balancer Operator is created by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get secret aws-load-balancer-operator --template='{{index .data "credentials"}}' | base64 -d ----- -+ -.Example output -[source,terminal] ----- -[default] -sts_regional_endpoints = regional -role_arn = arn:aws:iam::999999999999:role/aws-load-balancer-operator-aws-load-balancer-operator -web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- diff --git a/modules/bound-sa-tokens-about.adoc b/modules/bound-sa-tokens-about.adoc deleted file mode 100644 index 03114f2b9277..000000000000 --- a/modules/bound-sa-tokens-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: CONCEPT -[id="bound-sa-tokens-about_{context}"] -= About bound service account tokens - -You can use bound service account tokens to limit the scope of permissions for a given service account token. These tokens are audience and time-bound. This facilitates the authentication of a service account to an IAM role and the generation of temporary credentials mounted to a pod. You can request bound service account tokens by using volume projection and the TokenRequest API. diff --git a/modules/bound-sa-tokens-configuring-externally.adoc b/modules/bound-sa-tokens-configuring-externally.adoc deleted file mode 100644 index 5b26a8f1c045..000000000000 --- a/modules/bound-sa-tokens-configuring-externally.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: PROCEDURE -[id="bound-sa-tokens-configuring-externally_{context}"] -= Creating bound service account tokens outside the pod - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created a service account. This procedure assumes that the service account is named `build-robot`. - -.Procedure - -* Create the bound service account token outside the pod by running the following command: -+ -[source,terminal] ----- -$ oc create token build-robot ----- -+ -.Example output -[source,terminal] ----- -eyJhbGciOiJSUzI1NiIsImtpZCI6IkY2M1N4MHRvc2xFNnFSQlA4eG9GYzVPdnN3NkhIV0tRWmFrUDRNcWx4S0kifQ.eyJhdWQiOlsiaHR0cHM6Ly9pc3N1ZXIyLnRlc3QuY29tIiwiaHR0cHM6Ly9pc3N1ZXIxLnRlc3QuY29tIiwiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTY3OTU0MzgzMCwiaWF0IjoxNjc5NTQwMjMwLCJpc3MiOiJodHRwczovL2lzc3VlcjIudGVzdC5jb20iLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoidGVzdC1zYSIsInVpZCI6ImM3ZjA4MjkwLWIzOTUtNGM4NC04NjI4LTMzMTM1NTVhNWY1OSJ9fSwibmJmIjoxNjc5NTQwMjMwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDp0ZXN0LXNhIn0.WyAOPvh1BFMUl3LNhBCrQeaB5wSynbnCfojWuNNPSilT4YvFnKibxwREwmzHpV4LO1xOFZHSi6bXBOmG_o-m0XNDYL3FrGHd65mymiFyluztxa2lgHVxjw5reIV5ZLgNSol3Y8bJqQqmNg3rtQQWRML2kpJBXdDHNww0E5XOypmffYkfkadli8lN5QQD-MhsCbiAF8waCYs8bj6V6Y7uUKTcxee8sCjiRMVtXKjQtooERKm-CH_p57wxCljIBeM89VdaR51NJGued4hVV5lxvVrYZFu89lBEAq4oyQN_d6N1vBWGXQMyoihnt_fQjn-NfnlJWk-3NSZDIluDJAv7e-MTEk3geDrHVQKNEzDei2-Un64hSzb-n1g1M0Vn0885wQBQAePC9UlZm8YZlMNk1tq6wIUKQTMv3HPfi5HtBRqVc2eVs0EfMX4-x-PHhPCasJ6qLJWyj6DvyQ08dP4DW_TWZVGvKlmId0hzwpg59TTcLR0iCklSEJgAVEEd13Aa_M0-faD11L3MhUGxw0qxgOsPczdXUsolSISbefs7OKymzFSIkTAn9sDQ8PHMOsuyxsK8vzfrR-E0z7MAeguZ2kaIY7cZqbN6WFy0caWgx46hrKem9vCKALefElRYbCg3hcBmowBcRTOqaFHLNnHghhU1LaRpoFzH7OUarqX9SGQ ----- diff --git a/modules/bound-sa-tokens-configuring.adoc b/modules/bound-sa-tokens-configuring.adoc deleted file mode 100644 index 7c544dcab394..000000000000 --- a/modules/bound-sa-tokens-configuring.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: PROCEDURE -[id="bound-sa-tokens-configuring_{context}"] -= Configuring bound service account tokens using volume projection - -You can configure pods to request bound service account tokens by using volume projection. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created a service account. This procedure assumes that the service account is named `build-robot`. - -.Procedure - -. Optional: Set the service account issuer. -+ -This step is typically not required if the bound tokens are used only within the cluster. -+ -[IMPORTANT] -==== -If you change the service account issuer to a custom one, the previous service account issuer is still trusted for the next 24 hours. - -You can force all holders to request a new bound token either by manually restarting all pods in the cluster or by performing a rolling node restart. Before performing either action, wait for a new revision of the Kubernetes API server pods to roll out with your service account issuer changes. -==== - -.. Edit the `cluster` `Authentication` object: -+ -[source,terminal] ----- -$ oc edit authentications cluster ----- - -.. Set the `spec.serviceAccountIssuer` field to the desired service account issuer value: -+ -[source,yaml] ----- -spec: - serviceAccountIssuer: https://test.default.svc <1> ----- -<1> This value should be a URL from which the recipient of a bound token can source the public keys necessary to verify the signature of the token. The default is [x-]`https://kubernetes.default.svc`. - -.. Save the file to apply the changes. - -.. Wait for a new revision of the Kubernetes API server pods to roll out. It can take several minutes for all nodes to update to the new revision. Run the following command: -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for the Kubernetes API server to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 12 <1> ----- -<1> In this example, the latest revision number is `12`. -+ -If the output shows a message similar to one of the following messages, the update is still in progress. Wait a few minutes and try again. - -** `3 nodes are at revision 11; 0 nodes have achieved new revision 12` -** `2 nodes are at revision 11; 1 nodes are at revision 12` - -.. Optional: Force the holder to request a new bound token either by performing a rolling node restart or by manually restarting all pods in the cluster. - -*** Perform a rolling node restart: -+ -[WARNING] -==== -It is not recommended to perform a rolling node restart if you have custom workloads running on your cluster, because it can cause a service interruption. Instead, manually restart all pods in the cluster. -==== -+ -Restart nodes sequentially. Wait for the node to become fully available before restarting the next node. See _Rebooting a node gracefully_ for instructions on how to drain, restart, and mark a node as schedulable again. - -*** Manually restart all pods in the cluster: -+ -[WARNING] -==== -Be aware that running this command causes a service interruption, because it deletes every running pod in every namespace. These pods will automatically restart after they are deleted. -==== -+ -Run the following command: -+ -[source,terminal] ----- -$ for I in $(oc get ns -o jsonpath='{range .items[*]} {.metadata.name}{"\n"} {end}'); \ - do oc delete pods --all -n $I; \ - sleep 1; \ - done ----- - -. Configure a pod to use a bound service account token by using volume projection. - -.. Create a file called `pod-projected-svc-token.yaml` with the following contents: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - image: nginx - name: nginx - volumeMounts: - - mountPath: /var/run/secrets/tokens - name: vault-token - serviceAccountName: build-robot <1> - volumes: - - name: vault-token - projected: - sources: - - serviceAccountToken: - path: vault-token <2> - expirationSeconds: 7200 <3> - audience: vault <4> ----- -<1> A reference to an existing service account. -<2> The path relative to the mount point of the file to project the token into. -<3> Optionally set the expiration of the service account token, in seconds. The default is 3600 seconds (1 hour) and must be at least 600 seconds (10 minutes). The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours. -<4> Optionally set the intended audience of the token. The recipient of a token should verify that the recipient identity matches the audience claim of the token, and should otherwise reject the token. The audience defaults to the identifier of the API server. - -.. Create the pod: -+ -[source,terminal] ----- -$ oc create -f pod-projected-svc-token.yaml ----- -+ -The kubelet requests and stores the token on behalf of the pod, makes the token available to the pod at a configurable file path, and refreshes the token as it approaches expiration. - -. The application that uses the bound token must handle reloading the token when it rotates. -+ -The kubelet rotates the token if it is older than 80 percent of its time to live, or if the token is older than 24 hours. diff --git a/modules/build-image-docker.adoc b/modules/build-image-docker.adoc deleted file mode 100644 index 9c6a6cabd400..000000000000 --- a/modules/build-image-docker.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/deploy-plugin-cluster.adoc - -:_content-type: PROCEDURE -[id="build-image-with-docker_{context}"] -= Build an image with Docker - -To deploy your plugin on a cluster, you need to build an image and push it to an image registry. - -.Procedure - -. Build the image with the following command: -+ -[source,terminal] ----- -$ docker build -t quay.io/my-repositroy/my-plugin:latest . ----- - -. Optional: If you want to test your image, run the following command: -+ -[source,terminal] ----- -$ docker run -it --rm -d -p 9001:80 quay.io/my-repository/my-plugin:latest ----- - -. Push the image by running the following command: -+ -[source,terminal] ----- -$ docker push quay.io/my-repository/my-plugin:latest ----- diff --git a/modules/building-memcached-operator-using-osdk.adoc b/modules/building-memcached-operator-using-osdk.adoc deleted file mode 100644 index 5406c77dec65..000000000000 --- a/modules/building-memcached-operator-using-osdk.adoc +++ /dev/null @@ -1,443 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-getting-started.adoc - -[id="building-memcached-operator-using-osdk_{context}"] -= Building a Go-based Operator using the Operator SDK - -This procedure walks through an example of building a simple Memcached Operator using tools and libraries provided by the SDK. - -.Prerequisites - -- Operator SDK CLI installed on the development workstation -- Operator Lifecycle Manager (OLM) installed on a Kubernetes-based cluster (v1.8 -or above to support the `apps/v1beta2` API group), for example {product-title} {product-version} -- Access to the cluster using an account with `cluster-admin` permissions -- OpenShift CLI (`oc`) v{product-version}+ installed - -.Procedure - -. *Create a new project.* -+ -Use the CLI to create a new `memcached-operator` project: -+ -[source,terminal] ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ cd $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ operator-sdk new memcached-operator ----- -+ -[source,terminal] ----- -$ cd memcached-operator ----- - -. *Add a new custom resource definition (CRD).* - -.. Use the CLI to add a new CRD API called `Memcached`, with `APIVersion` set to `cache.example.com/v1apha1` and `Kind` set to `Memcached`: -+ -[source,terminal] ----- -$ operator-sdk add api \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds the Memcached resource API under `pkg/apis/cache/v1alpha1/`. - -.. Modify the spec and status of the `Memcached` custom resource (CR) at the `pkg/apis/cache/v1alpha1/memcached_types.go` file: -+ -[source,go] ----- -type MemcachedSpec struct { - // Size is the size of the memcached deployment - Size int32 `json:"size"` -} -type MemcachedStatus struct { - // Nodes are the names of the memcached pods - Nodes []string `json:"nodes"` -} ----- - -.. After modifying the `*_types.go` file, always run the following command to update the generated code for that resource type: -+ -[source,terminal] ----- -$ operator-sdk generate k8s ----- - -. *Optional: Add custom validation to your CRD.* -+ -OpenAPI v3.0 schemas are added to CRD manifests in the `spec.validation` block when the manifests are generated. This validation block allows Kubernetes to validate the properties in a Memcached CR when it is created or updated. -+ -Additionally, a `pkg/apis///zz_generated.openapi.go` file is generated. This file contains the Go representation of this validation block if the `+k8s:openapi-gen=true annotation` is present above the `Kind` type declaration, which is present by default. This auto-generated code is the OpenAPI model of your Go `Kind` type, from which you can create a full OpenAPI Specification and generate a client. -+ -As an Operator author, you can use Kubebuilder markers (annotations) to configure custom validations for your API. These markers must always have a `+kubebuilder:validation` prefix. For example, adding an enum-type specification can be done by adding the following marker: -+ -[source,go] ----- -// +kubebuilder:validation:Enum=Lion;Wolf;Dragon -type Alias string ----- -+ -Usage of markers in API code is discussed in the Kubebuilder link:https://book.kubebuilder.io/reference/generating-crd.html[Generating CRDs] and link:https://book.kubebuilder.io/reference/markers.html[Markers for Config/Code Generation] documentation. A full list of OpenAPIv3 validation markers is also available in the Kubebuilder link:https://book.kubebuilder.io/reference/markers/crd-validation.html[CRD Validation] documentation. -+ -If you add any custom validations, run the following command to update the OpenAPI validation section in the `deploy/crds/cache.example.com_memcacheds_crd.yaml` file for the CRD: -+ -[source,terminal] ----- -$ operator-sdk generate crds ----- -+ -.Example generated YAML -[source,yaml] ----- -spec: - validation: - openAPIV3Schema: - properties: - spec: - properties: - size: - format: int32 - type: integer ----- - -. *Add a new controller.* - -.. Add a new controller to the project to watch and reconcile the `Memcached` resource: -+ -[source,terminal] ----- -$ operator-sdk add controller \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds a new controller implementation under `pkg/controller/memcached/`. - -.. For this example, replace the generated controller file `pkg/controller/memcached/memcached_controller.go` with the link:https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl[example implementation]. -+ -The example controller executes the following reconciliation logic for each `Memcached` resource: -+ --- -* Create a Memcached deployment if it does not exist. -* Ensure that the Deployment size is the same as specified by the `Memcached` CR spec. -* Update the `Memcached` resource status with the names of the Memcached pods. --- -+ -The next two sub-steps inspect how the controller watches resources and how the reconcile loop is triggered. You can skip these steps to go directly to building and running the Operator. - -.. Inspect the controller implementation at the `pkg/controller/memcached/memcached_controller.go` file to see how the controller watches resources. -+ -The first watch is for the `Memcached` type as the primary resource. For each add, update, or delete event, the reconcile loop is sent a reconcile `Request` (a `:` key) for that `Memcached` object: -+ -[source,go] ----- -err := c.Watch( - &source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) ----- -+ -The next watch is for `Deployment` objects, but the event handler maps each event to a reconcile `Request` for the owner of the deployment. In this case, this is the `Memcached` object for which the deployment was created. This allows the controller to watch deployments as a secondary resource: -+ -[source,go] ----- -err := c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) ----- - -.. Every controller has a `Reconciler` object with a `Reconcile()` method that implements the reconcile loop. The reconcile loop is passed the `Request` argument which is a `:` key used to lookup the primary resource object, `Memcached`, from the cache: -+ -[source,go] ----- -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Lookup the Memcached instance for this reconcile request - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - ... -} ----- -+ -Based on the return value of the `Reconcile()` function, the reconcile `Request` might be requeued, and the loop might be triggered again: -+ -[source,go] ----- -// Reconcile successful - don't requeue -return reconcile.Result{}, nil -// Reconcile failed due to error - requeue -return reconcile.Result{}, err -// Requeue for any reason other than error -return reconcile.Result{Requeue: true}, nil ----- -[id="building-memcached-operator-using-osdk-build-and-run_{context}"] - -. *Build and run the Operator.* - -.. Before running the Operator, the CRD must be registered with the Kubernetes API server: -+ -[source,terminal] ----- -$ oc create \ - -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- - -.. After registering the CRD, there are two options for running the Operator: -+ --- -* As a Deployment inside a Kubernetes cluster -* As Go program outside a cluster --- -+ -Choose one of the following methods. - -... _Option A:_ Running as a deployment inside the cluster. - -.... Build the `memcached-operator` image and push it to a registry: -+ -[source,terminal] ----- -$ operator-sdk build quay.io/example/memcached-operator:v0.0.1 ----- - -.... The deployment manifest is generated at `deploy/operator.yaml`. Update the deployment image as follows since the default is just a placeholder: -+ -[source,terminal] ----- -$ sed -i 's|REPLACE_IMAGE|quay.io/example/memcached-operator:v0.0.1|g' deploy/operator.yaml ----- - -.... Ensure you have an account on link:https://quay.io[Quay.io] for the next step, or substitute your preferred container registry. On the registry, link:https://quay.io/new/[create a new public image] repository named `memcached-operator`. - -.... Push the image to the registry: -+ -[source,terminal] ----- -$ podman push quay.io/example/memcached-operator:v0.0.1 ----- - -.... Set up RBAC and create the `memcached-operator` manifests: -+ -[source,terminal] ----- -$ oc create -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/service_account.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/operator.yaml ----- - -.... Verify that the `memcached-operator` deploy is up and running: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 1m ----- - -... _Option B:_ Running locally outside the cluster. -+ -This method is preferred during development cycle to deploy and test faster. -+ -Run the Operator locally with the default Kubernetes configuration file present at `$HOME/.kube/config`: -+ -[source,terminal] ----- -$ operator-sdk run --local --namespace=default ----- -+ -You can use a specific `kubeconfig` using the flag `--kubeconfig=`. - -. *Verify that the Operator can deploy a Memcached application* by creating a `Memcached` CR. - -.. Create the example `Memcached` CR that was generated at `deploy/crds/cache_v1alpha1_memcached_cr.yaml`. - -.. View the file: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 3 ----- - -.. Create the object: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Ensure that `memcached-operator` creates the deployment for the CR: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 2m -example-memcached 3 3 3 3 1m ----- - -.. Check the pods and CR to confirm the CR status is updated with the pod names: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -example-memcached-6fd7c98d8-7dqdr 1/1 Running 0 1m -example-memcached-6fd7c98d8-g5k7v 1/1 Running 0 1m -example-memcached-6fd7c98d8-m7vn7 1/1 Running 0 1m -memcached-operator-7cc7cfdf86-vvjqk 1/1 Running 0 2m ----- -+ -[source,terminal] ----- -$ oc get memcached/example-memcached -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: cache.example.com/v1alpha1 -kind: Memcached -metadata: - clusterName: "" - creationTimestamp: 2018-03-31T22:51:08Z - generation: 0 - name: example-memcached - namespace: default - resourceVersion: "245453" - selfLink: /apis/cache.example.com/v1alpha1/namespaces/default/memcacheds/example-memcached - uid: 0026cc97-3536-11e8-bd83-0800274106a1 -spec: - size: 3 -status: - nodes: - - example-memcached-6fd7c98d8-7dqdr - - example-memcached-6fd7c98d8-g5k7v - - example-memcached-6fd7c98d8-m7vn7 ----- - -. *Verify that the Operator can manage a deployed Memcached application* by updating the size of the deployment. - -.. Change the `spec.size` field in the `memcached` CR from `3` to `4`: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 4 ----- - -.. Apply the change: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Confirm that the Operator changes the deployment size: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-memcached 4 4 4 4 5m ----- - -. *Clean up the resources:* -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/operator.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/service_account.yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information about OpenAPI v3.0 validation schemas in CRDs, refer to the link:https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema[Kubernetes documentation]. diff --git a/modules/builds-adding-input-secrets-configmaps.adoc b/modules/builds-adding-input-secrets-configmaps.adoc deleted file mode 100644 index 1b52c52f7643..000000000000 --- a/modules/builds-adding-input-secrets-configmaps.adoc +++ /dev/null @@ -1,123 +0,0 @@ -:_content-type: PROCEDURE -[id="builds-adding-input-secrets-configmaps_{context}"] -= Adding input secrets and config maps - -To provide credentials and other configuration data to a build without placing them in source control, you can define input secrets and input config maps. - -In some scenarios, build operations require credentials or other configuration data to access dependent resources. To make that information available without placing it in source control, you can define input secrets and input config maps. - -.Procedure - -To add an input secret, config maps, or both to an existing `BuildConfig` object: - -. Create the `ConfigMap` object, if it does not exist: -+ -[source,terminal] ----- -$ oc create configmap settings-mvn \ - --from-file=settings.xml= ----- -+ -This creates a new config map named `settings-mvn`, which contains the plain text content of the `settings.xml` file. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: -[source,yaml] ----- -apiVersion: core/v1 -kind: ConfigMap -metadata: - name: settings-mvn -data: - settings.xml: | - - … # Insert maven settings here - ----- -==== - - -. Create the `Secret` object, if it does not exist: -+ -[source,terminal] ----- -$ oc create secret generic secret-mvn \ - --from-file=ssh-privatekey= - --type=kubernetes.io/ssh-auth ----- -+ -This creates a new secret named `secret-mvn`, which contains the base64 encoded content of the `id_rsa` private key. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the input secret: -[source,yaml] ----- -apiVersion: core/v1 -kind: Secret -metadata: - name: secret-mvn -type: kubernetes.io/ssh-auth -data: - ssh-privatekey: | - # Insert ssh private key, base64 encoded ----- -==== - -. Add the config map and secret to the `source` section in the existing -`BuildConfig` object: -+ -[source,yaml] ----- -source: - git: - uri: https://github.com/wildfly/quickstart.git - contextDir: helloworld - configMaps: - - configMap: - name: settings-mvn - secrets: - - secret: - name: secret-mvn ----- - -To include the secret and config map in a new `BuildConfig` object, run the following command: - -[source,terminal] ----- -$ oc new-build \ - openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git \ - --context-dir helloworld --build-secret “secret-mvn” \ - --build-config-map "settings-mvn" ----- - -During the build, the `settings.xml` and `id_rsa` files are copied into the directory where the source code is located. In {product-title} S2I builder images, this is the image working directory, which is set using the `WORKDIR` instruction in the `Dockerfile`. If you want to specify another directory, add a `destinationDir` to the definition: - -[source,yaml] ----- -source: - git: - uri: https://github.com/wildfly/quickstart.git - contextDir: helloworld - configMaps: - - configMap: - name: settings-mvn - destinationDir: ".m2" - secrets: - - secret: - name: secret-mvn - destinationDir: ".ssh" ----- - -You can also specify the destination directory when creating a new `BuildConfig` object: - -[source,terminal] ----- -$ oc new-build \ - openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git \ - --context-dir helloworld --build-secret “secret-mvn:.ssh” \ - --build-config-map "settings-mvn:.m2" ----- - -In both cases, the `settings.xml` file is added to the `./.m2` directory of the build environment, and the `id_rsa` key is added to the `./.ssh` directory. diff --git a/modules/builds-adding-source-clone-secrets.adoc b/modules/builds-adding-source-clone-secrets.adoc deleted file mode 100644 index 8102276ddf00..000000000000 --- a/modules/builds-adding-source-clone-secrets.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-adding-source-clone-secrets_{context}"] -= Source Clone Secrets - -Builder pods require access to any Git repositories defined as source for a build. Source clone secrets are used to provide the builder pod with access it would not normally have access to, such as private repositories or repositories with self-signed or untrusted SSL certificates. - -The following source clone secret configurations are supported: - -* .gitconfig File -* Basic Authentication -* SSH Key Authentication -* Trusted Certificate Authorities - -[NOTE] -==== -You can also use combinations of these configurations to meet your specific needs. -==== diff --git a/modules/builds-assigning-builds-to-nodes.adoc b/modules/builds-assigning-builds-to-nodes.adoc deleted file mode 100644 index bfdc08422f83..000000000000 --- a/modules/builds-assigning-builds-to-nodes.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-assigning-builds-to-nodes_{context}"] -= Assigning builds to specific nodes - -Builds can be targeted to run on specific nodes by specifying labels in the `nodeSelector` field of a build configuration. The `nodeSelector` value is a set of key-value pairs that are matched to `Node` labels when scheduling the build pod. - -The `nodeSelector` value can also be controlled by cluster-wide default and override values. Defaults will only be applied if the build configuration does not define any key-value pairs for the `nodeSelector` and also does not define an explicitly empty map value of `nodeSelector:{}`. Override values will replace values in the build configuration on a key by key basis. - -//See Configuring Global Build Defaults and Overrides for more information. - -[NOTE] -==== -If the specified `NodeSelector` cannot be matched to a node with those labels, the build still stay in the `Pending` state indefinitely. -==== - -.Procedure - -* Assign builds to run on specific nodes by assigning labels in the `nodeSelector` field of the `BuildConfig`, for example: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - nodeSelector:<1> - key1: value1 - key2: value2 ----- -<1> Builds associated with this build configuration will run only on nodes with the `key1=value2` and `key2=value2` labels. diff --git a/modules/builds-automatically-add-source-clone-secrets.adoc b/modules/builds-automatically-add-source-clone-secrets.adoc deleted file mode 100644 index 9be83d18d409..000000000000 --- a/modules/builds-automatically-add-source-clone-secrets.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-automatically-add-source-clone-secrets_{context}"] -= Automatically adding a source clone secret to a build configuration - -When a `BuildConfig` is created, {product-title} can automatically populate its source clone secret reference. This behavior allows the resulting builds to automatically use the credentials stored in the referenced secret to authenticate to a remote Git repository, without requiring further configuration. - -To use this functionality, a secret containing the Git repository credentials must exist in the namespace in which the `BuildConfig` is later created. This secrets must include one or more annotations prefixed with `build.openshift.io/source-secret-match-uri-`. The value of each of these annotations is a Uniform Resource Identifier (URI) pattern, which is defined as follows. When a `BuildConfig` is created without a source clone secret reference and its Git source URI matches a URI pattern in a secret annotation, {product-title} automatically inserts a reference to that secret in the `BuildConfig`. - -.Prerequisites - -A URI pattern must consist of: - -* A valid scheme: `*://`, `git://`, `http://`, `https://` or `ssh://` -* A host: \*` or a valid hostname or IP address optionally preceded by `*.` -* A path: `/\*` or `/` followed by any characters optionally including `*` characters - -In all of the above, a `*` character is interpreted as a wildcard. - -[IMPORTANT] -==== -URI patterns must match Git source URIs which are conformant to link:https://www.ietf.org/rfc/rfc3986.txt[RFC3986]. Do not include a username (or password) component in a URI pattern. - -For example, if you use `ssh://git@bitbucket.atlassian.com:7999/ATLASSIAN jira.git` for a git repository URL, the source secret must be specified as `pass:c[ssh://bitbucket.atlassian.com:7999/*]` (and not `pass:c[ssh://git@bitbucket.atlassian.com:7999/*]`). - -[source,terminal] ----- -$ oc annotate secret mysecret \ - 'build.openshift.io/source-secret-match-uri-1=ssh://bitbucket.atlassian.com:7999/*' ----- - -==== - -.Procedure - -If multiple secrets match the Git URI of a particular `BuildConfig`, {product-title} selects the secret with the longest match. This allows for basic overriding, as in the following example. - -The following fragment shows two partial source clone secrets, the first matching any server in the domain `mycorp.com` accessed by HTTPS, and the second overriding access to servers `mydev1.mycorp.com` and `mydev2.mycorp.com`: - -[source,yaml] ----- -kind: Secret -apiVersion: v1 -metadata: - name: matches-all-corporate-servers-https-only - annotations: - build.openshift.io/source-secret-match-uri-1: https://*.mycorp.com/* -data: - ... ---- -kind: Secret -apiVersion: v1 -metadata: - name: override-for-my-dev-servers-https-only - annotations: - build.openshift.io/source-secret-match-uri-1: https://mydev1.mycorp.com/* - build.openshift.io/source-secret-match-uri-2: https://mydev2.mycorp.com/* -data: - ... ----- - -* Add a `build.openshift.io/source-secret-match-uri-` annotation to a pre-existing secret using: -+ -[source,terminal] ----- -$ oc annotate secret mysecret \ - 'build.openshift.io/source-secret-match-uri-1=https://*.mycorp.com/*' ----- diff --git a/modules/builds-basic-access-build-logs.adoc b/modules/builds-basic-access-build-logs.adoc deleted file mode 100644 index 2d5c743cb2b6..000000000000 --- a/modules/builds-basic-access-build-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-build-logs_{context}"] -= Accessing build logs - -You can access build logs using the web console or the CLI. - -.Procedure - -* To stream the logs using the build directly, enter the following command: -+ -[source,terminal] ----- -$ oc describe build ----- diff --git a/modules/builds-basic-access-build-verbosity.adoc b/modules/builds-basic-access-build-verbosity.adoc deleted file mode 100644 index c969ea8da1c0..000000000000 --- a/modules/builds-basic-access-build-verbosity.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-build-verbosity_{context}"] -= Enabling log verbosity - -You can enable a more verbose output by passing the `BUILD_LOGLEVEL` environment variable as part of the `sourceStrategy` -ifdef::openshift-origin,openshift-enterprise[] -or `dockerStrategy` -endif::[] -in a `BuildConfig`. - -[NOTE] -==== -An administrator can set the default build verbosity for the entire {product-title} instance by configuring `env/BUILD_LOGLEVEL`. This default can be overridden by specifying `BUILD_LOGLEVEL` in a given `BuildConfig`. You can specify a higher priority override on the command line for non-binary builds by passing `--build-loglevel` to `oc start-build`. -==== - -Available log levels for source builds are as follows: - -[horizontal] -Level 0:: Produces output from containers running the `assemble` script and all encountered errors. This is the default. -Level 1:: Produces basic information about the executed process. -Level 2:: Produces very detailed information about the executed process. -Level 3:: Produces very detailed information about the executed process, and a listing of the archive contents. -Level 4:: Currently produces the same information as level 3. -Level 5:: Produces everything mentioned on previous levels and additionally provides docker push messages. - -.Procedure - -* To enable more verbose output, pass the `BUILD_LOGLEVEL` environment variable as part of the `sourceStrategy` -ifndef::openshift-online[] -or `dockerStrategy` -endif::[] -in a `BuildConfig`: -+ -[source,yaml] ----- -sourceStrategy: -... - env: - - name: "BUILD_LOGLEVEL" - value: "2" <1> ----- -<1> Adjust this value to the desired log level. diff --git a/modules/builds-basic-access-buildconfig-logs.adoc b/modules/builds-basic-access-buildconfig-logs.adoc deleted file mode 100644 index c5eaa2386c74..000000000000 --- a/modules/builds-basic-access-buildconfig-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-buildconfig-logs_{context}"] -= Accessing BuildConfig logs - -You can access `BuildConfig` logs using the web console or the CLI. - -.Procedure - -* To stream the logs of the latest build for a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc logs -f bc/ ----- diff --git a/modules/builds-basic-access-buildconfig-version-logs.adoc b/modules/builds-basic-access-buildconfig-version-logs.adoc deleted file mode 100644 index 4dfe03f3ce88..000000000000 --- a/modules/builds-basic-access-buildconfig-version-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-buildconfig-version-logs_{context}"] -= Accessing BuildConfig logs for a given version build - -You can access logs for a given version build for a `BuildConfig` using the web console or the CLI. - -.Procedure - -* To stream the logs for a given version build for a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc logs --version= bc/ ----- diff --git a/modules/builds-basic-cancel-all-state.adoc b/modules/builds-basic-cancel-all-state.adoc deleted file mode 100644 index 9a6cac4782b2..000000000000 --- a/modules/builds-basic-cancel-all-state.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-all-state_{context}"] -= Canceling all builds in a given state - -You can cancel all builds in a given state, such as `new` or `pending`, while ignoring the builds in other states. - -.Procedure - -* To cancel all in a given state, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build bc/ ----- diff --git a/modules/builds-basic-cancel-all.adoc b/modules/builds-basic-cancel-all.adoc deleted file mode 100644 index 5fbfd37fdc1d..000000000000 --- a/modules/builds-basic-cancel-all.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-all_{context}"] -= Canceling all builds - -You can cancel all builds from the build configuration with the following CLI command. - -.Procedure - -* To cancel all builds, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build bc/ ----- diff --git a/modules/builds-basic-cancel-build.adoc b/modules/builds-basic-cancel-build.adoc deleted file mode 100644 index 3f34748c789c..000000000000 --- a/modules/builds-basic-cancel-build.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-build_{context}"] -= Canceling a build - -You can cancel a build using the web console, or with the following CLI command. - -.Procedure - -* To manually cancel a build, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build ----- diff --git a/modules/builds-basic-cancel-multiple.adoc b/modules/builds-basic-cancel-multiple.adoc deleted file mode 100644 index f116b33d4b15..000000000000 --- a/modules/builds-basic-cancel-multiple.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-multiple_{context}"] -= Canceling multiple builds - -You can cancel multiple builds with the following CLI command. - -.Procedure - -* To manually cancel multiple builds, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build ----- diff --git a/modules/builds-basic-delete-buildconfig.adoc b/modules/builds-basic-delete-buildconfig.adoc deleted file mode 100644 index c0297e357dca..000000000000 --- a/modules/builds-basic-delete-buildconfig.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-delete-buildconfig_{context}"] -= Deleting a BuildConfig - -You can delete a `BuildConfig` using the following command. - -.Procedure - -* To delete a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc delete bc ----- -+ -This also deletes all builds that were instantiated from this `BuildConfig`. - -* To delete a `BuildConfig` and keep the builds instatiated from the `BuildConfig`, specify the `--cascade=false` flag when you enter the following command: -+ -[source,terminal] ----- -$ oc delete --cascade=false bc ----- diff --git a/modules/builds-basic-edit-buildconfig.adoc b/modules/builds-basic-edit-buildconfig.adoc deleted file mode 100644 index 698f4139f838..000000000000 --- a/modules/builds-basic-edit-buildconfig.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-edit-buildconfig_{context}"] -= Editing a BuildConfig - -To edit your build configurations, you use the *Edit BuildConfig* option in the *Builds* view of the *Developer* perspective. - -You can use either of the following views to edit a `BuildConfig`: - -* The *Form view* enables you to edit your `BuildConfig` using the standard form fields and checkboxes. -* The *YAML view* enables you to edit your `BuildConfig` with full control over the operations. - -You can switch between the *Form view* and *YAML view* without losing any data. The data in the *Form view* is transferred to the *YAML view* and vice versa. - -.Procedure - -. In the *Builds* view of the *Developer* perspective, click the menu {kebab} to see the *Edit BuildConfig* option. -. Click *Edit BuildConfig* to see the *Form view* option. -. In the *Git* section, enter the Git repository URL for the codebase you want to use to create an application. The URL is then validated. -* Optional: Click *Show Advanced Git Options* to add details such as: -** *Git Reference* to specify a branch, tag, or commit that contains code you want to use to build the application. -** *Context Dir* to specify the subdirectory that contains code you want to use to build the application. -** *Source Secret* to create a *Secret Name* with credentials for pulling your source code from a private repository. -. In the *Build from* section, select the option that you would like to build from. You can use the following options: -** *Image Stream tag* references an image for a given image stream and tag. Enter the project, image stream, and tag of the location you would like to build from and push to. -** *Image Stream image* references an image for a given image stream and image name. Enter the image stream image you would like to build from. Also enter the project, image stream, and tag to push to. -** *Docker image*: The Docker image is referenced through a Docker image repository. You will also need to enter the project, image stream, and tag to refer to where you would like to push to. -. Optional: In the *Environment Variables* section, add the environment variables associated with the project by using the *Name* and *Value* fields. To add more environment variables, use *Add Value*, or *Add from ConfigMap* and *Secret* . -. Optional: To further customize your application, use the following advanced options: -Trigger:: -Triggers a new image build when the builder image changes. Add more triggers by clicking *Add Trigger* and selecting the *Type* and *Secret*. - -Secrets:: -Adds secrets for your application. Add more secrets by clicking *Add secret* and selecting the *Secret* and *Mount point*. - -Policy:: -Click *Run policy* to select the build run policy. The selected policy determines the order in which builds created from the build configuration must run. - -Hooks:: -Select *Run build hooks after image is built* to run commands at the end of the build and verify the image. Add *Hook type*, *Command*, and *Arguments* to append to the command. - -. Click *Save* to save the `BuildConfig`. diff --git a/modules/builds-basic-start-build.adoc b/modules/builds-basic-start-build.adoc deleted file mode 100644 index 15f093b5c24b..000000000000 --- a/modules/builds-basic-start-build.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-build_{context}"] -= Starting a build - -You can manually start a new build from an existing build configuration in your current project. - -.Procedure - -To manually start a build, enter the following command: - -[source,terminal] ----- -$ oc start-build ----- diff --git a/modules/builds-basic-start-environment-variable.adoc b/modules/builds-basic-start-environment-variable.adoc deleted file mode 100644 index f09305f58976..000000000000 --- a/modules/builds-basic-start-environment-variable.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-environment-variable_{context}"] -= Setting environment variables when starting a build - -You can specify the `--env` flag to set any desired environment variable for the build. - -.Procedure - -* To specify a desired environment variable, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --env== ----- diff --git a/modules/builds-basic-start-logs.adoc b/modules/builds-basic-start-logs.adoc deleted file mode 100644 index 4c4ec349f430..000000000000 --- a/modules/builds-basic-start-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-logs_{context}"] -= Streaming build logs - -You can specify the `--follow` flag to stream the build's logs in `stdout`. - -.Procedure - -* To manually stream a build's logs in `stdout`, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --follow ----- diff --git a/modules/builds-basic-start-re-run.adoc b/modules/builds-basic-start-re-run.adoc deleted file mode 100644 index 3a7690c1964b..000000000000 --- a/modules/builds-basic-start-re-run.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-re-run_{context}"] -= Re-running a build - -You can manually re-run a build using the `--from-build` flag. - -.Procedure - -* To manually re-run a build, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --from-build= ----- diff --git a/modules/builds-basic-start-source.adoc b/modules/builds-basic-start-source.adoc deleted file mode 100644 index 598a7b46399b..000000000000 --- a/modules/builds-basic-start-source.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-source_{context}"] -= Starting a build with source - -Rather than relying on a Git source pull -ifdef::openshift-origin,openshift-enterprise[] -or a Dockerfile -endif::[] -for a build, you can also start a build by directly pushing your source, which could be the contents of a Git or SVN working directory, a set of pre-built binary artifacts you want to deploy, or a single file. This can be done by specifying one of the following options for the `start-build` command: - -[cols="1,2",options="header"] -|=== -|Option |Description - -|`--from-dir=` -|Specifies a directory that will be archived and used as a binary input for the build. - -|`--from-file=` -|Specifies a single file that will be the only file in the build source. The file is placed in the root of an empty directory with the same file name as the original file provided. - -|`--from-repo=` -|Specifies a path to a local repository to use as the binary input for a build. Add the `--commit` option to control which branch, tag, or commit is used for the build. -|=== - -When passing any of these options directly to the build, the contents are streamed to the build and override the current build source settings. - -[NOTE] -==== -Builds triggered from binary input will not preserve the source on the server, so rebuilds triggered by base image changes will use the source specified in the build configuration. -==== - -.Procedure - -* Start a build from a source using the following command to send the contents of a local Git repository as an archive from the tag `v2`: -+ -[source,terminal] ----- -$ oc start-build hello-world --from-repo=../hello-world --commit=v2 ----- diff --git a/modules/builds-basic-view-build-details.adoc b/modules/builds-basic-view-build-details.adoc deleted file mode 100644 index 8d32c5b7f846..000000000000 --- a/modules/builds-basic-view-build-details.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-view-build-details_{context}"] -= Viewing build details - -You can view build details with the web console or by using the `oc describe` CLI command. - -This displays information including: - -* The build source. -* The build strategy. -* The output destination. -* Digest of the image in the destination registry. -* How the build was created. - -If the build uses the -ifdef::openshift-origin,openshift-enterprise[] -`Docker` or -endif::[] -`Source` strategy, the `oc describe` output also includes information about the source revision used for the build, including the commit ID, author, committer, and message. - -.Procedure - -* To view build details, enter the following command: -+ -[source,terminal] ----- -$ oc describe build ----- diff --git a/modules/builds-binary-source.adoc b/modules/builds-binary-source.adoc deleted file mode 100644 index 25af1615c63b..000000000000 --- a/modules/builds-binary-source.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-binary-source_{context}"] -= Binary (local) source - -Streaming content from a local file system to the builder is called a `Binary` type build. The corresponding value of `BuildConfig.spec.source.type` is `Binary` for these builds. - -This source type is unique in that it is leveraged solely based on your use of the `oc start-build`. - -[NOTE] -==== -Binary type builds require content to be streamed from the local file system, so automatically triggering a binary type build, like an image change trigger, is not possible. This is because the binary files cannot be provided. Similarly, you cannot launch binary type builds from the web console. -==== - -To utilize binary builds, invoke `oc start-build` with one of these options: - -* `--from-file`: The contents of the file you specify are sent as a binary stream to the builder. You can also specify a URL to a file. Then, the builder stores the data in a file with the same name at the top of the build context. - -* `--from-dir` and `--from-repo`: The contents are archived and sent as a binary stream to the builder. Then, the builder extracts the contents of the archive within the build context directory. With `--from-dir`, you can also specify a URL to an archive, which is extracted. - -* `--from-archive`: The archive you specify is sent to the builder, where it is extracted within the build context directory. This option behaves the same as `--from-dir`; an archive is created on your host first, whenever the argument to these options is a directory. - -In each of the previously listed cases: - -* If your `BuildConfig` already has a `Binary` source type defined, it is effectively ignored and replaced by what the client sends. - -* If your `BuildConfig` has a `Git` source type defined, it is dynamically disabled, since `Binary` and `Git` are mutually exclusive, and the data in the binary stream provided to the builder takes precedence. - -Instead of a file name, you can pass a URL with HTTP or HTTPS schema to `--from-file` and `--from-archive`. When using `--from-file` with a URL, the name of the file in the builder image is determined by the `Content-Disposition` header sent by the web server, or the last component of the URL path if the header is not present. No form of authentication is supported and it is not possible to use custom TLS certificate or disable certificate validation. - -When using `oc new-build --binary=true`, the command ensures that the restrictions associated with binary builds are enforced. The resulting `BuildConfig` has a source type of `Binary`, meaning that the only valid way to run a build for this `BuildConfig` is to use `oc start-build` with one of the `--from` options to provide the requisite binary data. - -ifndef::openshift-online[] -The Dockerfile and `contextDir` source options have special meaning with binary builds. - -Dockerfile can be used with any binary build source. If Dockerfile is used and the binary stream is an archive, its contents serve as a replacement Dockerfile to any Dockerfile in the archive. If Dockerfile is used with the `--from-file` argument, and the file argument is named Dockerfile, the value from Dockerfile replaces the value from the binary stream. -endif::[] - -In the case of the binary stream encapsulating extracted archive content, the value of the `contextDir` field is interpreted as a subdirectory within the archive, and, if valid, the builder changes into that subdirectory before executing the build. diff --git a/modules/builds-build-custom-builder-image.adoc b/modules/builds-build-custom-builder-image.adoc deleted file mode 100644 index 811a96a47180..000000000000 --- a/modules/builds-build-custom-builder-image.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-build-custom-builder-image_{context}"] -= Build custom builder image - -You can use {product-title} to build and push custom builder images to use in a custom strategy. - -.Prerequisites - -* Define all the inputs that will go into creating your new custom builder image. - -.Procedure - -. Define a `BuildConfig` object that will build your custom builder image: -+ -[source,terminal] ----- -$ oc new-build --binary --strategy=docker --name custom-builder-image ----- - -. From the directory in which you created your custom build image, run the build: -+ -[source,terminal] ----- -$ oc start-build custom-builder-image --from-dir . -F ----- -+ -After the build completes, your new custom builder image is available in your project in an image stream tag that is named `custom-builder-image:latest`. diff --git a/modules/builds-build-environment.adoc b/modules/builds-build-environment.adoc deleted file mode 100644 index 9477f016a7c4..000000000000 --- a/modules/builds-build-environment.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-build-environment_{context}"] -= Build environments - -As with pod environment variables, build environment variables can be defined in terms of references to other resources or variables using the Downward API. There are some exceptions, which are noted. - -You can also manage environment variables defined in the `BuildConfig` with the `oc set env` command. - -[NOTE] -==== -Referencing container resources using `valueFrom` in build environment variables is not supported as the references are resolved before the container is created. -==== diff --git a/modules/builds-build-hooks.adoc b/modules/builds-build-hooks.adoc deleted file mode 100644 index f2aa289b7be6..000000000000 --- a/modules/builds-build-hooks.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-build-hooks_{context}"] -= Build hooks - -Build hooks allow behavior to be injected into the build process. - -The `postCommit` field of a `BuildConfig` object runs commands inside a temporary container that is running the build output image. The hook is run immediately after the last layer of the image has been committed and before the image is pushed to a registry. - -The current working directory is set to the image's `WORKDIR`, which is the default working directory of the container image. For most images, this is where the source code is located. - -The hook fails if the script or command returns a non-zero exit code or if starting the temporary container fails. When the hook fails it marks the build as failed and the image is not pushed to a registry. The reason for failing can be inspected by looking at the build logs. - -Build hooks can be used to run unit tests to verify the image before the build is marked complete and the image is made available in a registry. If all tests pass and the test runner returns with exit code `0`, the build is marked successful. In case of any test failure, the build is marked as failed. In all cases, the build log contains the output of the test runner, which can be used to identify failed tests. - -The `postCommit` hook is not only limited to running tests, but can be used for other commands as well. Since it runs in a temporary container, changes made by the hook do not persist, meaning that running the hook cannot affect the final image. This behavior allows for, among other uses, the installation and usage of test dependencies that are automatically discarded and are not present in the final image. diff --git a/modules/builds-build-pruning.adoc b/modules/builds-build-pruning.adoc deleted file mode 100644 index ceba6c9191a5..000000000000 --- a/modules/builds-build-pruning.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-build-pruning_{context}"] -= Pruning builds - -By default, builds that have completed their lifecycle are persisted indefinitely. You can limit the number of previous builds that are retained. - -.Procedure - -. Limit the number of previous builds that are retained by supplying a positive integer value for `successfulBuildsHistoryLimit` or `failedBuildsHistoryLimit` in your `BuildConfig`, for example: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - successfulBuildsHistoryLimit: 2 <1> - failedBuildsHistoryLimit: 2 <2> ----- -<1> `successfulBuildsHistoryLimit` will retain up to two builds with a status of `completed`. -<2> `failedBuildsHistoryLimit` will retain up to two builds with a status of `failed`, `canceled`, or `error`. - -. Trigger build pruning by one of the following actions: -+ -* Updating a build configuration. -* Waiting for a build to complete its lifecycle. - -Builds are sorted by their creation timestamp with the oldest builds being pruned first. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[NOTE] -==== -Administrators can manually prune builds using the 'oc adm' object pruning command. -==== -endif::[] diff --git a/modules/builds-build-run-policy.adoc b/modules/builds-build-run-policy.adoc deleted file mode 100644 index 216ce745586b..000000000000 --- a/modules/builds-build-run-policy.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -[id="builds-build-run-policy_{context}"] -= Build run policy - -The build run policy describes the order in which the builds created from the build configuration should run. This can be done by changing the value of the `runPolicy` field in the `spec` section of the `Build` specification. - -It is also possible to change the `runPolicy` value for existing build configurations, by: - -* Changing `Parallel` to `Serial` or `SerialLatestOnly` and triggering a new build from this configuration causes the new build to wait until all parallel builds complete as the serial build can only run alone. -* Changing `Serial` to `SerialLatestOnly` and triggering a new build causes cancellation of all existing builds in queue, except the currently running build and the most recently created build. The newest build runs next. diff --git a/modules/builds-buildconfig.adoc b/modules/builds-buildconfig.adoc deleted file mode 100644 index 86101dfb34d7..000000000000 --- a/modules/builds-buildconfig.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// * builds/understanding-builds.adoc - -:_content-type: REFERENCE -[id="builds-buildconfig_{context}"] -= BuildConfigs - -A build configuration describes a single build definition and a set of triggers for when a new build is created. Build configurations are defined by a `BuildConfig`, which is a REST object that can be used in a POST to the API server to create a new instance. - -A build configuration, or `BuildConfig`, is characterized by a build strategy -and one or more sources. The strategy determines the process, while the sources provide its input. - -Depending on how you choose to create your application using {product-title}, a `BuildConfig` is typically generated automatically for you if you use the web console or CLI, and it can be edited at any time. Understanding the parts that make up a `BuildConfig` and their available options can help if you choose to manually change your configuration later. - -The following example `BuildConfig` results in a new build every time a container image tag or the source code changes: - -.`BuildConfig` object definition -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: "ruby-sample-build" <1> -spec: - runPolicy: "Serial" <2> - triggers: <3> - - - type: "GitHub" - github: - secret: "secret101" - - type: "Generic" - generic: - secret: "secret101" - - - type: "ImageChange" - source: <4> - git: - uri: "https://github.com/openshift/ruby-hello-world" - strategy: <5> - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "ruby-20-centos7:latest" - output: <6> - to: - kind: "ImageStreamTag" - name: "origin-ruby-sample:latest" - postCommit: <7> - script: "bundle exec rake test" ----- -<1> This specification creates a new `BuildConfig` named `ruby-sample-build`. -<2> The `runPolicy` field controls whether builds created from this build configuration can be run simultaneously. The default value is `Serial`, which means new builds run sequentially, not simultaneously. -<3> You can specify a list of triggers, which cause a new build to be created. -<4> The `source` section defines the source of the build. The source type determines the primary source of input, and can be either `Git`, to point to a code repository location, -ifndef::openshift-online[] -`Dockerfile`, to build from an inline Dockerfile, -endif::[] -or `Binary`, to accept binary payloads. It is possible to have multiple sources at once. For more information about each source type, see "Creating build inputs". -<5> The `strategy` section describes the build strategy used to execute the build. You can specify a `Source` -ifndef::openshift-online[] -, `Docker`, or `Custom` -endif::[] -strategy here. This example uses the `ruby-20-centos7` container image that Source-to-image (S2I) uses for the application build. -<6> After the container image is successfully built, it is pushed into the repository described in the `output` section. -<7> The `postCommit` section defines an optional build hook. diff --git a/modules/builds-chaining-builds.adoc b/modules/builds-chaining-builds.adoc deleted file mode 100644 index b8275716fd8f..000000000000 --- a/modules/builds-chaining-builds.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -[id="builds-chaining-builds_{context}"] -= Chained builds - -For compiled languages such as Go, C, C++, and Java, including the dependencies necessary for compilation in the application image might increase the size of the image or introduce vulnerabilities that can be exploited. - -To avoid these problems, two builds can be chained together. One build that produces the compiled artifact, and a second build that places that artifact in a separate image that runs the artifact. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -In the following example, a source-to-image (S2I) build is combined with a docker build to compile an artifact that is then placed in a separate runtime image. - -[NOTE] -==== -Although this example chains a S2I build and a docker build, the first build can use any strategy that produces an image containing the desired artifacts, and the second build can use any strategy that can consume input content from an image. -==== - -//image::chained-build.png[Chained Build] *Needs update* - -The first build takes the application source and produces an image containing a `WAR` file. The image is pushed to the `artifact-image` image stream. The path of the output artifact depends on the `assemble` script of the S2I builder used. In this case, it is output to `/wildfly/standalone/deployments/ROOT.war`. - -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: artifact-build -spec: - output: - to: - kind: ImageStreamTag - name: artifact-image:latest - source: - git: - uri: https://github.com/openshift/openshift-jee-sample.git - ref: "master" - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: wildfly:10.1 - namespace: openshift ----- - -The second build uses image source with a path to the WAR file inside the output image from the first build. An inline `dockerfile` copies that `WAR` file into a runtime image. - -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: image-build -spec: - output: - to: - kind: ImageStreamTag - name: image-build:latest - source: - dockerfile: |- - FROM jee-runtime:latest - COPY ROOT.war /deployments/ROOT.war - images: - - from: <1> - kind: ImageStreamTag - name: artifact-image:latest - paths: <2> - - sourcePath: /wildfly/standalone/deployments/ROOT.war - destinationDir: "." - strategy: - dockerStrategy: - from: <3> - kind: ImageStreamTag - name: jee-runtime:latest - triggers: - - imageChange: {} - type: ImageChange ----- -<1> `from` specifies that the docker build should include the output of the image from the `artifact-image` image stream, which was the target of the previous build. -<2> `paths` specifies which paths from the target image to include in the current docker build. -<3> The runtime image is used as the source image for the docker build. - -The result of this setup is that the output image of the second build does not have to contain any of the build tools that are needed to create the `WAR` file. Also, because the second build contains an image change trigger, whenever the first build is run and produces a new image with the binary artifact, the second build is automatically triggered to produce a runtime image that contains that artifact. Therefore, both builds behave as a single build with two stages. -endif::[] diff --git a/modules/builds-configuration-change-triggers.adoc b/modules/builds-configuration-change-triggers.adoc deleted file mode 100644 index d9c548c19d66..000000000000 --- a/modules/builds-configuration-change-triggers.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-configuration-change-triggers_{context}"] -= Configuration change triggers - -A configuration change trigger allows a build to be automatically invoked as soon as a new `BuildConfig` is created. - -The following is an example trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- - type: "ConfigChange" ----- - -[NOTE] -==== -Configuration change triggers currently only work when creating a new `BuildConfig`. In a future release, configuration change triggers will also be able to launch a build whenever a `BuildConfig` is updated. -==== diff --git a/modules/builds-configuration-file.adoc b/modules/builds-configuration-file.adoc deleted file mode 100644 index 1ebe97f28a56..000000000000 --- a/modules/builds-configuration-file.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-configuration.adoc - -:_content-type: PROCEDURE -[id="builds-configuration-file_{context}"] -= Configuring build settings - -You can configure build settings by editing the `build.config.openshift.io/cluster` resource. - -.Procedure - -* Edit the `build.config.openshift.io/cluster` resource: -+ -[source,terminal] ----- -$ oc edit build.config.openshift.io/cluster ----- -+ -The following is an example `build.config.openshift.io/cluster` resource: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Build<1> -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 2 - name: cluster - resourceVersion: "107233" - selfLink: /apis/config.openshift.io/v1/builds/cluster - uid: e2e9cc14-78a9-11e9-b92b-06d6c7da38dc -spec: - buildDefaults:<2> - defaultProxy:<3> - httpProxy: http://proxy.com - httpsProxy: https://proxy.com - noProxy: internal.com - env:<4> - - name: envkey - value: envvalue - gitProxy:<5> - httpProxy: http://gitproxy.com - httpsProxy: https://gitproxy.com - noProxy: internalgit.com - imageLabels:<6> - - name: labelkey - value: labelvalue - resources:<7> - limits: - cpu: 100m - memory: 50Mi - requests: - cpu: 10m - memory: 10Mi - buildOverrides:<8> - imageLabels:<9> - - name: labelkey - value: labelvalue - nodeSelector:<10> - selectorkey: selectorvalue - tolerations:<11> - - effect: NoSchedule - key: node-role.kubernetes.io/builds -operator: Exists ----- -<1> `Build`: Holds cluster-wide information on how to handle builds. The canonical, and only valid name is `cluster`. -<2> `buildDefaults`: Controls the default information for builds. -<3> `defaultProxy`: Contains the default proxy settings for all build operations, including image pull or push and source download. -<4> `env`: A set of default environment variables that are applied to the build if the specified variables do not exist on the build. -<5> `gitProxy`: Contains the proxy settings for Git operations only. If set, this overrides any Proxy settings for all Git commands, such as `git clone`. -<6> `imageLabels`: A list of labels that are applied to the resulting image. -You can override a default label by providing a label with the same name in the `BuildConfig`. -<7> `resources`: Defines resource requirements to execute the build. -<8> `buildOverrides`: Controls override settings for builds. -<9> `imageLabels`: A list of labels that are applied to the resulting image. -If you provided a label in the `BuildConfig` with the same name as one in this table, your label will be overwritten. -<10> `nodeSelector`: A selector which must be true for the build pod to fit on a node. -<11> `tolerations`: A list of tolerations that overrides any existing tolerations set on a build pod. diff --git a/modules/builds-configuration-parameters.adoc b/modules/builds-configuration-parameters.adoc deleted file mode 100644 index 30581f31fff1..000000000000 --- a/modules/builds-configuration-parameters.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-configuration.adoc - -[id="builds-configuration-parameters_{context}"] -= Build controller configuration parameters - -The `build.config.openshift.io/cluster` resource offers the following configuration parameters. - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`Build` -|Holds cluster-wide information on how to handle builds. The canonical, and only valid name is `cluster`. - -`spec`: Holds user-settable values for the build controller configuration. - -|`buildDefaults` -|Controls the default information for builds. - -`defaultProxy`: Contains the default proxy settings for all build operations, including image pull or push and source download. - -You can override values by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the `BuildConfig` strategy. - -`gitProxy`: Contains the proxy settings for Git operations only. If set, this overrides any proxy settings for all Git commands, such as `git clone`. - -Values that are not set here are inherited from DefaultProxy. - -`env`: A set of default environment variables that are applied to the build if the specified variables do not exist on the build. - -`imageLabels`: A list of labels that are applied to the resulting image. You can override a default label by providing a label with the same name in the `BuildConfig`. - -`resources`: Defines resource requirements to execute the build. - -|`ImageLabel` -|`name`: Defines the name of the label. It must have non-zero length. - -|`buildOverrides` -|Controls override settings for builds. - -`imageLabels`: A list of labels that are applied to the resulting image. If you provided a label in the `BuildConfig` with the same name as one in this table, your label will be overwritten. - -`nodeSelector`: A selector which must be true for the build pod to fit on a node. - -`tolerations`: A list of tolerations that overrides any existing tolerations set on a build pod. - -|`BuildList` -|`items`: Standard object's metadata. - -|=== diff --git a/modules/builds-configuring-post-commit-build-hooks.adoc b/modules/builds-configuring-post-commit-build-hooks.adoc deleted file mode 100644 index 4084f62e322b..000000000000 --- a/modules/builds-configuring-post-commit-build-hooks.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-configuring-post-commit-build-hooks_{context}"] -= Configuring post commit build hooks - -There are different ways to configure the post build hook. All forms in the following examples are equivalent and run `bundle exec rake test --verbose`. - -.Procedure - -* Shell script: -+ -[source,yaml] ----- -postCommit: - script: "bundle exec rake test --verbose" ----- -+ -The `script` value is a shell script to be run with `/bin/sh -ic`. Use this when a shell script is appropriate to execute the build hook. For example, for running unit tests as above. To control the image entry point, or if the image does not have `/bin/sh`, use `command` and/or `args`. -+ -[NOTE] -==== -The additional `-i` flag was introduced to improve the experience working with CentOS and RHEL images, and may be removed in a future release. -==== - -* Command as the image entry point: -+ -[source,yaml] ----- -postCommit: - command: ["/bin/bash", "-c", "bundle exec rake test --verbose"] ----- -+ -In this form, `command` is the command to run, which overrides the image -entry point in the exec form, as documented in the link:https://docs.docker.com/engine/reference/builder/#entrypoint[Dockerfile reference]. This is needed if the image does not have `/bin/sh`, or if you do not want to use a shell. In all other cases, using `script` might be more convenient. - -* Command with arguments: -+ -[source,yaml] ----- -postCommit: - command: ["bundle", "exec", "rake", "test"] - args: ["--verbose"] ----- -+ -This form is equivalent to appending the arguments to `command`. - -[NOTE] -==== -Providing both `script` and `command` simultaneously creates an invalid build hook. -==== diff --git a/modules/builds-create-custom-build-artifacts.adoc b/modules/builds-create-custom-build-artifacts.adoc deleted file mode 100644 index 8c3a4ccd74d1..000000000000 --- a/modules/builds-create-custom-build-artifacts.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-create-custom-build-artifacts_{context}"] -= Creating custom build artifacts - -You must create the image you want to use as your custom build image. - -.Procedure - -. Starting with an empty directory, create a file named `Dockerfile` with the following content: -+ -[source,terminal] ----- -FROM registry.redhat.io/rhel8/buildah -# In this example, `/tmp/build` contains the inputs that build when this -# custom builder image is run. Normally the custom builder image fetches -# this content from some location at build time, by using git clone as an example. -ADD dockerfile.sample /tmp/input/Dockerfile -ADD build.sh /usr/bin -RUN chmod a+x /usr/bin/build.sh -# /usr/bin/build.sh contains the actual custom build logic that will be run when -# this custom builder image is run. -ENTRYPOINT ["/usr/bin/build.sh"] ----- - -. In the same directory, create a file named `dockerfile.sample`. This file is included in the custom build image and defines the image that is produced by the custom build: -+ -[source,terminal] ----- -FROM registry.access.redhat.com/ubi9/ubi -RUN touch /tmp/build ----- - -. In the same directory, create a file named `build.sh`. This file contains the logic that is run when the custom build runs: -+ -[source,terminal] ----- -#!/bin/sh -# Note that in this case the build inputs are part of the custom builder image, but normally this -# is retrieved from an external source. -cd /tmp/input -# OUTPUT_REGISTRY and OUTPUT_IMAGE are env variables provided by the custom -# build framework -TAG="${OUTPUT_REGISTRY}/${OUTPUT_IMAGE}" - - -# performs the build of the new image defined by dockerfile.sample -buildah --storage-driver vfs bud --isolation chroot -t ${TAG} . - - -# buildah requires a slight modification to the push secret provided by the service -# account to use it for pushing the image -cp /var/run/secrets/openshift.io/push/.dockercfg /tmp -(echo "{ \"auths\": " ; cat /var/run/secrets/openshift.io/push/.dockercfg ; echo "}") > /tmp/.dockercfg - - -# push the new image to the target for the build -buildah --storage-driver vfs push --tls-verify=false --authfile /tmp/.dockercfg ${TAG} ----- diff --git a/modules/builds-create-imagestreamtag.adoc b/modules/builds-create-imagestreamtag.adoc deleted file mode 100644 index c424f221a576..000000000000 --- a/modules/builds-create-imagestreamtag.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-create-imagestreamtag_{context}"] -= Creating an image stream tag for the Red Hat Universal Base Image - -To use Red Hat subscriptions within a build, you create an image stream tag to reference the Universal Base Image (UBI). - -To make the UBI available *in every project* in the cluster, you add the image stream tag to the `openshift` namespace. Otherwise, to make it available *in a specific project*, you add the image stream tag to that project. - -The benefit of using image stream tags this way is that doing so grants access to the UBI based on the `registry.redhat.io` credentials in the install pull secret without exposing the pull secret to other users. This is more convenient than requiring each developer to install pull secrets with `registry.redhat.io` credentials in each project. - -.Procedure - -* To create an `ImageStreamTag` in the `openshift` namespace, so it is available to developers in all projects, enter: -+ -[source,terminal] ----- -$ oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest -n openshift ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create an `ImageStreamTag` in the `openshift` namespace: -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - name: ubi - namespace: openshift -spec: - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi9/ubi:latest - name: latest - referencePolicy: - type: Source ----- -==== - -* To create an `ImageStreamTag` in a single project, enter: -+ -[source,terminal] ----- -$ oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create an `ImageStreamTag` in a single project: -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - name: ubi -spec: - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi9/ubi:latest - name: latest - referencePolicy: - type: Source ----- -==== diff --git a/modules/builds-creating-secrets.adoc b/modules/builds-creating-secrets.adoc deleted file mode 100644 index 6b71c60b414a..000000000000 --- a/modules/builds-creating-secrets.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-creating-secrets_{context}"] -= Creating secrets - -You must create a secret before creating the pods that depend on that secret. - -When creating secrets: - -* Create a secret object with secret data. -* Update the pod service account to allow the reference to the secret. -* Create a pod, which consumes the secret as an environment variable or as a file using a `secret` volume. - -.Procedure - -* Use the create command to create a secret object from a JSON or YAML file: -+ -[source,terminal] ----- -$ oc create -f ----- -+ -For example, you can create a secret from your local `.docker/config.json` file: -+ -[source,terminal] ----- -$ oc create secret generic dockerhub \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- -+ -This command generates a JSON specification of the secret named `dockerhub` and creates the object. -+ -.YAML Opaque Secret Object Definition -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque <1> -data: - username: - password: ----- -+ -<1> Specifies an _opaque_ secret. -+ -.Docker Configuration JSON File Secret Object Definition -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: aregistrykey - namespace: myapps -type: kubernetes.io/dockerconfigjson <1> -data: - .dockerconfigjson:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== <2> ----- -+ -<1> Specifies that the secret is using a docker configuration JSON file. -<2> The output of a base64-encoded the docker configuration JSON file diff --git a/modules/builds-custom-strategy.adoc b/modules/builds-custom-strategy.adoc deleted file mode 100644 index 76d6c22656d9..000000000000 --- a/modules/builds-custom-strategy.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-custom-strategy_{context}"] -= Custom strategy - -When using a Custom strategy, all the defined input secrets and config maps are available in the builder container in the `/var/run/secrets/openshift.io/build` directory. The custom build image must use these secrets and config maps appropriately. With the Custom strategy, you can define secrets as described in Custom strategy options. - -There is no technical difference between existing strategy secrets and the input secrets. However, your builder image can distinguish between them and use them differently, based on your build use case. - -The input secrets are always mounted into the `/var/run/secrets/openshift.io/build` directory, or your builder can parse the `$BUILD` environment variable, which includes the full build object. - -[IMPORTANT] -==== -If a pull secret for the registry exists in both the namespace and the node, builds default to using the pull secret in the namespace. -==== diff --git a/modules/builds-define-build-inputs.adoc b/modules/builds-define-build-inputs.adoc deleted file mode 100644 index 3177260cb4e6..000000000000 --- a/modules/builds-define-build-inputs.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-define-build-inputs_{context}"] -= Build inputs - -A build input provides source content for builds to operate on. You can use the following build inputs to provide sources in {product-title}, listed in order of precedence: - -ifndef::openshift-online[] -* Inline Dockerfile definitions -endif::[] -* Content extracted from existing images -* Git repositories -* Binary (Local) inputs -* Input secrets -* External artifacts - -ifdef::openshift-online[] -[IMPORTANT] -==== -The docker build strategy is not supported in {product-title}. Therefore, inline Dockerfile definitions are not accepted. -==== -endif::[] - -You can combine multiple inputs in a single build. -ifndef::openshift-online[] -However, as the inline Dockerfile takes precedence, it can overwrite any other file named Dockerfile provided by another input. -endif::[] -Binary (local) input and Git repositories are mutually exclusive inputs. - -You can use input secrets when you do not want certain resources or credentials used during a build to be available in the final application image produced by the build, or want to consume a value that is defined in a secret resource. External artifacts can be used to pull in additional files that are not available as one of the other build input types. - -When you run a build: - -. A working directory is constructed and all input content is placed in the working directory. For example, the input Git repository is cloned into the working directory, and files specified from input images are copied into the working directory using the target path. - -. The build process changes directories into the `contextDir`, if one is defined. - -ifndef::openshift-online[] -. The inline Dockerfile, if any, is written to the current directory. -endif::[] - -. The content from the current directory is provided to the build process -for reference by the -ifndef::openshift-online[] -Dockerfile, custom builder logic, or -endif::[] -`assemble` script. This means any input content that resides outside the `contextDir` is ignored by the build. - -The following example of a source definition includes multiple input types and an explanation of how they are combined. For more details on how each input type is defined, see the specific sections for each input type. - -[source,yaml] ----- -source: - git: - uri: https://github.com/openshift/ruby-hello-world.git <1> - ref: "master" - images: - - from: - kind: ImageStreamTag - name: myinputimage:latest - namespace: mynamespace - paths: - - destinationDir: app/dir/injected/dir <2> - sourcePath: /usr/lib/somefile.jar - contextDir: "app/dir" <3> -ifndef::openshift-online[] - dockerfile: "FROM centos:7\nRUN yum install -y httpd" <4> -endif::[] ----- -<1> The repository to be cloned into the working directory for the build. -<2> `/usr/lib/somefile.jar` from `myinputimage` is stored in `/app/dir/injected/dir`. -<3> The working directory for the build becomes `/app/dir`. -ifndef::openshift-online[] -<4> A Dockerfile with this content is created in `/app/dir`, overwriting any existing file with that name. -endif::[] diff --git a/modules/builds-disabling-build-strategy-globally.adoc b/modules/builds-disabling-build-strategy-globally.adoc deleted file mode 100644 index 1e680725bd2e..000000000000 --- a/modules/builds-disabling-build-strategy-globally.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-disabling-build-strategy-globally_{context}"] -= Disabling access to a build strategy globally - -To prevent access to a particular build strategy globally, log in as a user with cluster administrator privileges, remove the corresponding role from the `system:authenticated` group, and apply the annotation `rbac.authorization.kubernetes.io/autoupdate: "false"` to protect them from changes between the API restarts. The following example shows disabling the docker build strategy. - -.Procedure - -. Apply the `rbac.authorization.kubernetes.io/autoupdate` annotation: -+ -[source,terminal] ----- -$ oc edit clusterrolebinding system:build-strategy-docker-binding ----- -+ -.Example output -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "false" <1> - creationTimestamp: 2018-08-10T01:24:14Z - name: system:build-strategy-docker-binding - resourceVersion: "225" - selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system%3Abuild-strategy-docker-binding - uid: 17b1f3d4-9c3c-11e8-be62-0800277d20bf -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:build-strategy-docker -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated ----- -<1> Change the `rbac.authorization.kubernetes.io/autoupdate` annotation's value to `"false"`. - -. Remove the role: -+ -[source,terminal] ----- -$ oc adm policy remove-cluster-role-from-group system:build-strategy-docker system:authenticated ----- - -. Ensure the build strategy subresources are also removed from these roles: -+ -[source,terminal] ----- -$ oc edit clusterrole admin ----- -+ -[source,terminal] ----- -$ oc edit clusterrole edit ----- - -. For each role, specify the subresources that correspond to the resource of the strategy to disable. - -.. Disable the docker Build Strategy for *admin*: -+ -[source,yaml] ----- -kind: ClusterRole -metadata: - name: admin -... -- apiGroups: - - "" - - build.openshift.io - resources: - - buildconfigs - - buildconfigs/webhooks - - builds/custom <1> - - builds/source - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -... ----- -<1> Add `builds/custom` and `builds/source` to disable docker builds globally for users with the *admin* role. diff --git a/modules/builds-displaying-webhook-urls.adoc b/modules/builds-displaying-webhook-urls.adoc deleted file mode 100644 index 0243bfd84089..000000000000 --- a/modules/builds-displaying-webhook-urls.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-displaying-webhook-urls_{context}"] -= Displaying webhook URLs - -You can use the following command to display webhook URLs associated with a build configuration. If the command does not display any webhook URLs, then no webhook trigger is defined for that build configuration. - -.Procedure - -* To display any webhook URLs associated with a `BuildConfig`, run: - -[source,terminal] ----- -$ oc describe bc ----- diff --git a/modules/builds-docker-credentials-private-registries.adoc b/modules/builds-docker-credentials-private-registries.adoc deleted file mode 100644 index 8e7fc9a5b216..000000000000 --- a/modules/builds-docker-credentials-private-registries.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-docker-credentials-private-registries_{context}"] -= Using docker credentials for private registries - -You can supply builds with a .`docker/config.json` file with valid credentials for private container registries. This allows you to push the output image into a private container image registry or pull a builder image from the private container image registry that requires authentication. - -You can supply credentials for multiple repositories within the same registry, each with credentials specific to that registry path. - -[NOTE] -==== -For the {product-title} container image registry, this is not required because secrets are generated automatically for you by {product-title}. -==== - -The `.docker/config.json` file is found in your home directory by default and -has the following format: - -[source,yaml] ----- -auths: - index.docker.io/v1/: <1> - auth: "YWRfbGzhcGU6R2labnRib21ifTE=" <2> - email: "user@example.com" <3> - docker.io/my-namespace/my-user/my-image: <4> - auth: "GzhYWRGU6R2fbclabnRgbkSp="" - email: "user@example.com" - docker.io/my-namespace: <5> - auth: "GzhYWRGU6R2deesfrRgbkSp="" - email: "user@example.com" ----- -<1> URL of the registry. -<2> Encrypted password. -<3> Email address for the login. -<4> URL and credentials for a specific image in a namespace. -<5> URL and credentials for a registry namespace. - -You can define multiple container image registries or define multiple repositories in the same registry. Alternatively, you can also add authentication entries to this file by running the `docker login` command. The file will be created if it does not exist. - -Kubernetes provides `Secret` objects, which can be used to store configuration and passwords. - -.Prerequisites - -* You must have a `.docker/config.json` file. - -.Procedure - -. Create the secret from your local `.docker/config.json` file: -+ -[source,terminal] ----- -$ oc create secret generic dockerhub \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- -+ -This generates a JSON specification of the secret named `dockerhub` and creates the object. -+ -. Add a `pushSecret` field into the `output` section of the `BuildConfig` and set it to the name of the `secret` that you created, which in the previous example is `dockerhub`: -+ -[source,yaml] ----- -spec: - output: - to: - kind: "DockerImage" - name: "private.registry.com/org/private-image:latest" - pushSecret: - name: "dockerhub" ----- -+ -You can use the `oc set build-secret` command to set the push secret on the build configuration: -+ -[source,terminal] ----- -$ oc set build-secret --push bc/sample-build dockerhub ----- -+ -You can also link the push secret to the service account used by the build instead of specifying the `pushSecret` field. By default, builds use the `builder` service account. The push secret is automatically added to the build if the secret contains a credential that matches the repository hosting the build's output image. -+ -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- -+ -. Pull the builder container image from a private container image registry by specifying the `pullSecret` field, which is part of the build strategy definition: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "DockerImage" - name: "docker.io/user/private_repository" - pullSecret: - name: "dockerhub" ----- -+ -You can use the `oc set build-secret` command to set the pull secret on the build configuration: -+ -[source,terminal] ----- -$ oc set build-secret --pull bc/sample-build dockerhub ----- -+ -ifndef::openshift-online[] -[NOTE] -==== -This example uses `pullSecret` in a Source build, but it is also applicable in Docker and Custom builds. -==== -endif::[] -+ -You can also link the pull secret to the service account used by the build instead of specifying the `pullSecret` field. By default, builds use the `builder` service account. The pull secret is automatically added to the build if the secret contains a credential that matches the repository hosting the build's input image. To link the pull secret to the service account used by the build instead of specifying the `pullSecret` field, run: -+ -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- -+ -[NOTE] -==== -You must specify a `from` image in the `BuildConfig` spec to take advantage of this feature. Docker strategy builds generated by `oc new-build` or `oc new-app` may not do this in some situations. -==== diff --git a/modules/builds-docker-source-build-output.adoc b/modules/builds-docker-source-build-output.adoc deleted file mode 100644 index b6bc7851cbf1..000000000000 --- a/modules/builds-docker-source-build-output.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-docker-source-build-output_{context}"] -= Build output - -Builds that use the -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker or -endif::[] -source-to-image (S2I) strategy result in the creation of a new container image. The image is then pushed to the container image registry specified in the `output` section of the `Build` specification. - -If the output kind is `ImageStreamTag`, then the image will be pushed to the integrated {product-registry} and tagged in the specified imagestream. If the output is of type `DockerImage`, then the name of the output reference will be used as a docker push specification. The specification may contain a registry or will default to DockerHub if no registry is specified. If the output section of the build specification is empty, then the image will not be pushed at the end of the build. - -.Output to an ImageStreamTag -[source,yaml] ----- -spec: - output: - to: - kind: "ImageStreamTag" - name: "sample-image:latest" ----- - -.Output to a docker Push Specification -[source,yaml] ----- -spec: - output: - to: - kind: "DockerImage" - name: "my-registry.mycompany.com:5000/myimages/myimage:tag" ----- diff --git a/modules/builds-docker-strategy.adoc b/modules/builds-docker-strategy.adoc deleted file mode 100644 index d3fa463e1c5c..000000000000 --- a/modules/builds-docker-strategy.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-docker-strategy_{context}"] -= Docker strategy - -When using a docker strategy, you can add all defined input secrets into your container image using the link:https://docs.docker.com/engine/reference/builder/#add[`ADD`] and link:https://docs.docker.com/engine/reference/builder/#copy[`COPY` instructions] in your Dockerfile. - -If you do not specify the `destinationDir` for a secret, then the files are copied into the same directory in which the Dockerfile is located. If you specify a relative path as `destinationDir`, then the secrets are copied into that directory, relative to your Dockerfile location. This makes the secret files available to the Docker build operation as part of the context directory used during the build. - -.Example of a Dockerfile referencing secret and config map data ----- -FROM centos/ruby-22-centos7 - -USER root -COPY ./secret-dir /secrets -COPY ./config / - -# Create a shell script that will output secrets and ConfigMaps when the image is run -RUN echo '#!/bin/sh' > /input_report.sh -RUN echo '(test -f /secrets/secret1 && echo -n "secret1=" && cat /secrets/secret1)' >> /input_report.sh -RUN echo '(test -f /config && echo -n "relative-configMap=" && cat /config)' >> /input_report.sh -RUN chmod 755 /input_report.sh - -CMD ["/bin/sh", "-c", "/input_report.sh"] ----- - -[IMPORTANT] -==== -Users normally remove their input secrets from the final application image so that the secrets are not present in the container running from that image. However, the secrets still exist in the image itself in the layer where they were added. This removal is part of the Dockerfile itself. - -To prevent the contents of input secrets and config maps from appearing in the build output container images and avoid this removal process altogether, xref:../../cicd/builds/build-strategies.adoc#builds-using-build-volumes_build-strategies-docker[use build volumes] in your Docker build strategy instead. -==== diff --git a/modules/builds-dockerfile-source.adoc b/modules/builds-dockerfile-source.adoc deleted file mode 100644 index 74e58d40d5a7..000000000000 --- a/modules/builds-dockerfile-source.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-dockerfile-source_{context}"] -= Dockerfile source - -When you supply a `dockerfile` value, the content of this field is written to disk as a file named `dockerfile`. This is done after other input sources are processed, so if the input source repository contains a Dockerfile in the root directory, it is overwritten with this content. - -The source definition is part of the `spec` section in the `BuildConfig`: - -[source,yaml] ----- -source: - dockerfile: "FROM centos:7\nRUN yum install -y httpd" <1> ----- -<1> The `dockerfile` field contains an inline Dockerfile that is built. - -[role="_additional-resources"] -.Additional resources - -* The typical use for this field is to provide a Dockerfile to a docker strategy build. diff --git a/modules/builds-gitconfig-file-secured-git.adoc b/modules/builds-gitconfig-file-secured-git.adoc deleted file mode 100644 index 0bb18b8ad29c..000000000000 --- a/modules/builds-gitconfig-file-secured-git.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-gitconfig-file-secured-git_{context}"] -= Creating a secret from a .gitconfig file for secured Git - -If your Git server is secured with two-way SSL and user name with password, you must add the certificate files to your source build and add references to the certificate files in the `.gitconfig` file. - -.Prerequisites - -* You must have Git credentials. - -.Procedure - -Add the certificate files to your source build and add references to the certificate files in the `.gitconfig` file. - -. Add the `client.crt`, `cacert.crt`, and `client.key` files to the `/var/run/secrets/openshift.io/source/` folder in the application source code. - -. In the `.gitconfig` file for the server, add the `[http]` section shown in the following example: -+ -[source,terminal] ----- -# cat .gitconfig ----- -+ -.Example output -[source,terminal] ----- -[user] - name = - email = -[http] - sslVerify = false - sslCert = /var/run/secrets/openshift.io/source/client.crt - sslKey = /var/run/secrets/openshift.io/source/client.key - sslCaInfo = /var/run/secrets/openshift.io/source/cacert.crt ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret generic \ ---from-literal=username= \ <1> ---from-literal=password= \ <2> ---from-file=.gitconfig=.gitconfig \ ---from-file=client.crt=/var/run/secrets/openshift.io/source/client.crt \ ---from-file=cacert.crt=/var/run/secrets/openshift.io/source/cacert.crt \ ---from-file=client.key=/var/run/secrets/openshift.io/source/client.key ----- -<1> The user's Git user name. -<2> The password for this user. - -[IMPORTANT] -==== -To avoid having to enter your password again, be sure to specify the source-to-image (S2I) image in your builds. However, if you cannot clone the repository, you must still specify your user name and password to promote the build. -==== - -[role="_additional-resources"] -.Additional resources - -* `/var/run/secrets/openshift.io/source/` folder in the application source code. diff --git a/modules/builds-gitconfig-file.adoc b/modules/builds-gitconfig-file.adoc deleted file mode 100644 index 111e5e11fe42..000000000000 --- a/modules/builds-gitconfig-file.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-gitconfig-file_{context}"] -= Creating a secret from a .gitconfig file - -If the cloning of your application is dependent on a `.gitconfig` file, then you can create a secret that contains it. Add it to the builder service account and then your `BuildConfig`. - -.Procedure - -* To create a secret from a `.gitconfig` file: - -[source,terminal] ----- -$ oc create secret generic --from-file= ----- - -[NOTE] -==== -SSL verification can be turned off if `sslVerify=false` is set for the `http` -section in your `.gitconfig` file: - -[source,text] ----- -[http] - sslVerify=false ----- -==== diff --git a/modules/builds-identifying-image-change-triggers.adoc b/modules/builds-identifying-image-change-triggers.adoc deleted file mode 100644 index 460a160b0a4d..000000000000 --- a/modules/builds-identifying-image-change-triggers.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-image-change-trigger-identification_{context}"] -= Identifying the image change trigger of a build - -As a developer, if you have image change triggers, you can identify which image change initiated the last build. This can be useful for debugging or troubleshooting builds. - -.Example `BuildConfig` -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: bc-ict-example - namespace: bc-ict-example-namespace -spec: - -# ... - - triggers: - - imageChange: - from: - kind: ImageStreamTag - name: input:latest - namespace: bc-ict-example-namespace - - imageChange: - from: - kind: ImageStreamTag - name: input2:latest - namespace: bc-ict-example-namespace - type: ImageChange -status: - imageChangeTriggers: - - from: - name: input:latest - namespace: bc-ict-example-namespace - lastTriggerTime: "2021-06-30T13:47:53Z" - lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input@sha256:0f88ffbeb9d25525720bfa3524cb1bf0908b7f791057cf1acfae917b11266a69 - - from: - name: input2:latest - namespace: bc-ict-example-namespace - lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input2@sha256:0f88ffbeb9d25525720bfa3524cb2ce0908b7f791057cf1acfae917b11266a69 - - lastVersion: 1 ----- - -[NOTE] -==== -This example omits elements that are not related to image change triggers. -==== - -.Prerequisites - -* You have configured multiple image change triggers. These triggers have triggered one or more builds. - -.Procedure - -. In `buildConfig.status.imageChangeTriggers` to identify the `lastTriggerTime` that has the latest timestamp. -+ -This `ImageChangeTriggerStatus` - - - Then you use the `name` and `namespace` from that build to find the corresponding image change trigger in `buildConfig.spec.triggers`. - -. Under `imageChangeTriggers`, compare timestamps to identify the latest - -.Image change triggers - -In your build configuration, `buildConfig.spec.triggers` is an array of build trigger policies, `BuildTriggerPolicy`. - -Each `BuildTriggerPolicy` has a `type` field and set of pointers fields. Each pointer field corresponds to one of the allowed values for the `type` field. As such, you can only set `BuildTriggerPolicy` to only one pointer field. - -For image change triggers, the value of `type` is `ImageChange`. Then, the `imageChange` field is the pointer to an `ImageChangeTrigger` object, which has the following fields: - -* `lastTriggeredImageID`: This field, which is not shown in the example, is deprecated in {product-title} 4.8 and will be ignored in a future release. It contains the resolved image reference for the `ImageStreamTag` when the last build was triggered from this `BuildConfig`. -* `paused`: You can use this field, which is not shown in the example, to temporarily disable this particular image change trigger. -* `from`: You use this field to reference the `ImageStreamTag` that drives this image change trigger. Its type is the core Kubernetes type, `OwnerReference`. - -The `from` field has the following fields of note: -** `kind`: For image change triggers, the only supported value is `ImageStreamTag`. -** `namespace`: You use this field to specify the namespace of the `ImageStreamTag`. -** `name`: You use this field to specify the `ImageStreamTag`. - -.Image change trigger status - -In your build configuration, `buildConfig.status.imageChangeTriggers` is an array of `ImageChangeTriggerStatus` elements. Each `ImageChangeTriggerStatus` element includes the `from`, `lastTriggeredImageID`, and `lastTriggerTime` elements shown in the preceding example. - -The `ImageChangeTriggerStatus` that has the most recent `lastTriggerTime` triggered the most recent build. You use its `name` and `namespace` to identify the image change trigger in `buildConfig.spec.triggers` that triggered the build. - -The `lastTriggerTime` with the most recent timestamp signifies the `ImageChangeTriggerStatus` of the last build. This `ImageChangeTriggerStatus` has the same `name` and `namespace` as the image change trigger in `buildConfig.spec.triggers` that triggered the build. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-image-change-trigger-identification.adoc b/modules/builds-image-change-trigger-identification.adoc deleted file mode 100644 index b0fd3f449be2..000000000000 --- a/modules/builds-image-change-trigger-identification.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-image-change-trigger-identification_{context}"] -= Image change trigger identification - -As a developer, if you have configured image change triggers, you can identify which image change initiated the last build. - -To accomplish this, you must identify elements in your build configuration's specification and status that are related to image change triggers. - -This way, you can use the timestamp in `buildConfig.status.imageChangeTriggers` to identify the most recent build. Then you can use the name and namespace of the image stream that triggered this build to find the corresponding image change trigger in the `buildConfig.spec.triggers`. - - -== Image change trigger elements in the specification - -In your build configuration specification, `buildConfig.spec.triggers` is an array of build trigger policies, `BuildTriggerPolicy`. - -Each `BuildTriggerPolicy` has a `type` field and set of pointers fields, where each pointer field corresponds to one of the allowed values for the `type` field. As such, only one pointer field can be set for a given `BuildTriggerPolicy`. - -So for image change triggers, the value of `type` is `ImageChange`. - -Then, the `imageChange` field is the pointer to an `ImageChangeTrigger` object. So this will be set. It has the following fields: - -* `lastTriggeredImageID`: This field is deprecated in {product-title} 4.8, but is still being set. It will be ignored in a future release. It contains the resolved image reference for the `ImageStreamTag` when the last build was triggered from this `BuildConfig`. -* `paused`: This field is used to temporarily disable this particular image change trigger. -* `from`: This field is used to reference the `ImageStreamTag` that drives this image change trigger. Its type is the core Kubernetes type, `OwnerReference`. The `from` field has the following fields of note: - * `kind`: In this case, the only supported value is `ImageStreamTag`. - * `namespace`: The namespace where the `ImageStreamTag` lives. - * `name`: The name of the `ImageStreamTag`. - -The following example shows the relative location of the elements mentioned in the preceding list and omits unrelated elements, such as `name`, `source`, and `strategy`. - -.Example `BuildConfig.spec` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -spec: - triggers: - - imageChange: - from: - kind: ImageStreamTag - name: <1> - namespace: <2> - type: ImageChange ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. - -== Image change trigger elements in the status - -In your build configuration status, `buildConfig.status.imageChangeTriggers` is an array of `ImageChangeTriggerStatus` elements. Each `ImageChangeTriggerStatus` element includes the `from`, `lastTriggeredImageID`, and `lastTriggerTime` elements shown in the following example. This example omits elements that are not related to image change triggers. - -.Example `BuildConfig.status` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -status: - imageChangeTriggers: - - from: - name: <1> - namespace: <2> - lastTriggeredImageID: <3> - lastTriggerTime: <4> ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. -<3> The SHA or ID of the `ImageStreamTag` when a build started. Its value is updated each time a build is started, even if this `ImageStreamTag` is not the reason the build started. -<4> The last time this particular `ImageStreamTag` triggered a build to start. Its value is only updated when this trigger specifically started a Build. - -== Identification of image change triggers - -The `ImageChangeTriggerStatus` that has the most recent `lastTriggerTime` triggered the most recent build. You can use its `name` and `namespace` to correlate it with the `ImageStreamTag` of one of the image change triggers you defined in the `buildConfig.spec.triggers`. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-image-source.adoc b/modules/builds-image-source.adoc deleted file mode 100644 index 8d7c16c0ed97..000000000000 --- a/modules/builds-image-source.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: CONCEPT - -[id="builds-image-source_{context}"] -= Image source - -You can add additional files to the build process with images. Input images are referenced in the same way the `From` and `To` image targets are defined. This means both container images and image stream tags can be referenced. In conjunction with the image, you must provide one or more path pairs to indicate the path of the files or directories to copy the image and the destination to place them in the build context. - -The source path can be any absolute path within the image specified. The destination must be a relative directory path. At build time, the image is loaded and the indicated files and directories are copied into the context directory of the build process. This is the same directory into which the source repository content is cloned. If the source path ends in `/.` then the content of the directory is copied, but the directory itself is not created at the destination. - -Image inputs are specified in the `source` definition of the `BuildConfig`: - -[source,yaml] ----- -source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - ref: "master" - images: <1> - - from: <2> - kind: ImageStreamTag - name: myinputimage:latest - namespace: mynamespace - paths: <3> - - destinationDir: injected/dir <4> - sourcePath: /usr/lib/somefile.jar <5> - - from: - kind: ImageStreamTag - name: myotherinputimage:latest - namespace: myothernamespace - pullSecret: mysecret <6> - paths: - - destinationDir: injected/dir - sourcePath: /usr/lib/somefile.jar ----- -<1> An array of one or more input images and files. -<2> A reference to the image containing the files to be copied. -<3> An array of source/destination paths. -<4> The directory relative to the build root where the build process can access the file. -<5> The location of the file to be copied out of the referenced image. -<6> An optional secret provided if credentials are needed to access the input image. -+ -[NOTE] -==== -If your cluster uses an `ImageDigestMirrorSet` or `ImageTagMirrorSet` object to configure repository mirroring, you can use only global pull secrets for mirrored registries. You cannot add a pull secret to a project. -==== - -.Images that require pull secrets - -When using an input image that requires a pull secret, you can link the pull secret to the service account used by the build. By default, builds use the `builder` service account. The pull secret is automatically added to the build if the secret contains a credential that matches the repository hosting the input image. To link a pull secret to the service account used by the build, run: - -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- - -ifndef::openshift-online[] -[NOTE] -==== -This feature is not supported for builds using the custom strategy. -==== -endif::[] - -.Images on mirrored registries that require pull secrets - -When using an input image from a mirrored registry, if you get a `build error: failed to pull image` message, you can resolve the error by using either of the following methods: - -* Create an input secret that contains the authentication credentials for the builder image’s repository and all known mirrors. In this case, create a pull secret for credentials to the image registry and its mirrors. -* Use the input secret as the pull secret on the `BuildConfig` object. diff --git a/modules/builds-input-secrets-configmaps.adoc b/modules/builds-input-secrets-configmaps.adoc deleted file mode 100644 index 6550cb2704b1..000000000000 --- a/modules/builds-input-secrets-configmaps.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-input-secrets-configmaps_{context}"] -= Input secrets and config maps - -[IMPORTANT] -==== -To prevent the contents of input secrets and config maps from appearing in build output container images, use build volumes in your xref:../../cicd/builds/build-strategies.adoc#builds-using-build-volumes_build-strategies-docker[Docker build] and xref:../../cicd/builds/build-strategies.adoc#builds-using-build-volumes_build-strategies-s2i[source-to-image build] strategies. -==== - -In some scenarios, build operations require credentials or other configuration data to access dependent resources, but it is undesirable for that information to be placed in source control. You can define input secrets and input config maps for this purpose. - -For example, when building a Java application with Maven, you can set up a private mirror of Maven Central or JCenter that is accessed by private keys. To download libraries from that private mirror, you have to supply the -following: - -. A `settings.xml` file configured with the mirror's URL and connection settings. -. A private key referenced in the settings file, such as `~/.ssh/id_rsa`. - -For security reasons, you do not want to expose your credentials in the application image. - -This example describes a Java application, but you can use the same approach for adding SSL certificates into the `/etc/ssl/certs` directory, API keys or tokens, license files, and more. diff --git a/modules/builds-manually-add-source-clone-secrets.adoc b/modules/builds-manually-add-source-clone-secrets.adoc deleted file mode 100644 index 8284e05ce320..000000000000 --- a/modules/builds-manually-add-source-clone-secrets.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-manually-add-source-clone-secrets_{context}"] -= Manually adding a source clone secret - -Source clone secrets can be added manually to a build configuration by adding a `sourceSecret` field to the `source` section inside the `BuildConfig` and setting it to the name of the secret that you created. In this example, it is the `basicsecret`. - -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - output: - to: - kind: "ImageStreamTag" - name: "sample-image:latest" - source: - git: - uri: "https://github.com/user/app.git" - sourceSecret: - name: "basicsecret" - strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "python-33-centos7:latest" ----- - -.Procedure - -You can also use the `oc set build-secret` command to set the source clone secret on an existing build configuration. - -* To set the source clone secret on an existing build configuration, enter the following command: -+ -[source,terminal] ----- -$ oc set build-secret --source bc/sample-build basicsecret ----- diff --git a/modules/builds-output-image-environment-variables.adoc b/modules/builds-output-image-environment-variables.adoc deleted file mode 100644 index 74c9ab57ab41..000000000000 --- a/modules/builds-output-image-environment-variables.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-output-image-environment-variables_{context}"] -= Output image environment variables - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker and -endif::[] -source-to-image (S2I) strategy builds set the following environment variables on output images: - -[options="header"] -|=== - -|Variable |Description - -|`OPENSHIFT_BUILD_NAME` -|Name of the build - -|`OPENSHIFT_BUILD_NAMESPACE` -|Namespace of the build - -|`OPENSHIFT_BUILD_SOURCE` -|The source URL of the build - -|`OPENSHIFT_BUILD_REFERENCE` -|The Git reference used in the build - -|`OPENSHIFT_BUILD_COMMIT` -|Source commit used in the build -|=== - -Additionally, any user-defined environment variable, for example those configured with -S2I] -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -or docker -endif::[] -strategy options, will also be part of the output image environment variable list. diff --git a/modules/builds-output-image-labels.adoc b/modules/builds-output-image-labels.adoc deleted file mode 100644 index 779c2380129e..000000000000 --- a/modules/builds-output-image-labels.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-output-image-labels_{context}"] -= Output image labels - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker and -endif::[] -source-to-image (S2I)` builds set the following labels on output images: - -[options="header"] -|=== - -|Label |Description - -|`io.openshift.build.commit.author` -|Author of the source commit used in the build - -|`io.openshift.build.commit.date` -|Date of the source commit used in the build - -|`io.openshift.build.commit.id` -|Hash of the source commit used in the build - -|`io.openshift.build.commit.message` -|Message of the source commit used in the build - -|`io.openshift.build.commit.ref` -|Branch or reference specified in the source - -|`io.openshift.build.source-location` -|Source URL for the build -|=== - -You can also use the `BuildConfig.spec.output.imageLabels` field to specify a list of custom labels that will be applied to each image built from the build configuration. - -.Custom Labels to be Applied to Built Images -[source,yaml] ----- -spec: - output: - to: - kind: "ImageStreamTag" - name: "my-image:latest" - imageLabels: - - name: "vendor" - value: "MyCompany" - - name: "authoritative-source-url" - value: "registry.mycompany.com" ----- diff --git a/modules/builds-restricting-build-strategy-globally.adoc b/modules/builds-restricting-build-strategy-globally.adoc deleted file mode 100644 index d069e219dce6..000000000000 --- a/modules/builds-restricting-build-strategy-globally.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-restricting-build-strategy-globally_{context}"] -= Restricting build strategies to users globally - -You can allow a set of specific users to create builds with a particular strategy. - -.Prerequisites - -* Disable global access to the build strategy. - -.Procedure - -* Assign the role that corresponds to the build strategy to a specific user. For -example, to add the `system:build-strategy-docker` cluster role to the user -`devuser`: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user system:build-strategy-docker devuser ----- -+ -[WARNING] -==== -Granting a user access at the cluster level to the `builds/docker` subresource means that the user can create builds with the docker strategy in any project in which they can create builds. -==== diff --git a/modules/builds-restricting-build-strategy-to-user.adoc b/modules/builds-restricting-build-strategy-to-user.adoc deleted file mode 100644 index e61500218bba..000000000000 --- a/modules/builds-restricting-build-strategy-to-user.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-restricting-build-strategy-to-user_{context}"] -= Restricting build strategies to a user within a project - -Similar to granting the build strategy role to a user globally, you can allow a set of specific users within a project to create builds with a particular strategy. - -.Prerequisites - -* Disable global access to the build strategy. - -.Procedure - -* Assign the role that corresponds to the build strategy to a specific user within a project. For example, to add the `system:build-strategy-docker` role within the project `devproject` to the user `devuser`: -+ -[source,terminal] ----- -$ oc adm policy add-role-to-user system:build-strategy-docker devuser -n devproject ----- diff --git a/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc b/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc deleted file mode 100644 index 09e696674da1..000000000000 --- a/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc +++ /dev/null @@ -1,202 +0,0 @@ -:_content-type: PROCEDURE -[id="builds-running-entitled-builds-with-sharedsecret-objects_{context}"] -= Running entitled builds using SharedSecret objects - -You can configure and perform a build in one namespace that securely uses RHEL entitlements from a `Secret` object in another namespace. - -You can still access RHEL entitlements from OpenShift Builds by creating a `Secret` object with your subscription credentials in the same namespace as your `Build` object. However, now, in {product-title} 4.10 and later, you can access your credentials and certificates from a `Secret` object in one of the {product-title} system namespaces. You run entitled builds with a CSI volume mount of a `SharedSecret` custom resource (CR) instance that references the `Secret` object. - -This procedure relies on the newly introduced Shared Resources CSI Driver feature, which you can use to declare CSI Volume mounts in {product-title} Builds. It also relies on the {product-title} Insights Operator. - -[IMPORTANT] -==== -[subs="attributes+"] -The Shared Resources CSI Driver and The Build CSI Volumes are both Technology Preview features, which are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. - -The Shared Resources CSI Driver and the Build CSI Volumes features also belong to the `TechPreviewNoUpgrade` feature set, which is a subset of the current Technology Preview features. You can enable the `TechPreviewNoUpgrade` feature set on test clusters, where you can fully test them while leaving the features disabled on production clusters. Enabling this feature set cannot be undone and prevents minor version updates. This feature set is not recommended on production clusters. See "Enabling Technology Preview features using feature gates" in the following "Additional resources" section. -==== - -.Prerequisites - -* You have enabled the `TechPreviewNoUpgrade` feature set by using the feature gates. -* You have a `SharedSecret` custom resource (CR) instance that references the `Secret` object where the Insights Operator stores the subscription credentials. -* You must have permission to perform the following actions: -** Create build configs and start builds. -** Discover which `SharedSecret` CR instances are available by entering the `oc get sharedsecrets` command and getting a non-empty list back. -** Determine if the `builder` service account available to you in your namespace is allowed to use the given `SharedSecret` CR instance. In other words, you can run `oc adm policy who-can use ` to see if the `builder` service account in your namespace is listed. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, establish, or ask someone to establish, the necessary role-based access control (RBAC) so that you can discover `SharedSecret` CR instances and enable service accounts to use `SharedSecret` CR instances. -==== - -.Procedure - -. Grant the `builder` service account RBAC permissions to use the `SharedSecret` CR instance by using `oc apply` with YAML content: -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming `SharedSecret` CR instances. -==== -+ -.Example `oc apply -f` command with YAML `Role` object definition -[source,terminal] ----- -$ oc apply -f - < 1ef7c6d8c1a -STEP 3/9: RUN rm /etc/rhsm-host -time="2022-02-03T20:28:33Z" level=warning msg="Adding metacopy option, configured globally" ---> b1c61f88b39 -STEP 4/9: RUN yum repolist --disablerepo=* -Updating Subscription Management repositories. - - -... - ---> b067f1d63eb -STEP 5/9: RUN subscription-manager repos --enable rhocp-4.9-for-rhel-8-x86_64-rpms -Repository 'rhocp-4.9-for-rhel-8-x86_64-rpms' is enabled for this system. -time="2022-02-03T20:28:40Z" level=warning msg="Adding metacopy option, configured globally" ---> 03927607ebd -STEP 6/9: RUN yum -y update -Updating Subscription Management repositories. - -... - -Upgraded: - systemd-239-51.el8_5.3.x86_64 systemd-libs-239-51.el8_5.3.x86_64 - systemd-pam-239-51.el8_5.3.x86_64 -Installed: - diffutils-3.6-6.el8.x86_64 libxkbcommon-0.9.1-1.el8.x86_64 - xkeyboard-config-2.28-1.el8.noarch - -Complete! -time="2022-02-03T20:29:05Z" level=warning msg="Adding metacopy option, configured globally" ---> db57e92ff63 -STEP 7/9: RUN yum install -y openshift-clients.x86_64 -Updating Subscription Management repositories. - -... - -Installed: - bash-completion-1:2.7-5.el8.noarch - libpkgconf-1.4.2-1.el8.x86_64 - openshift-clients-4.9.0-202201211735.p0.g3f16530.assembly.stream.el8.x86_64 - pkgconf-1.4.2-1.el8.x86_64 - pkgconf-m4-1.4.2-1.el8.noarch - pkgconf-pkg-config-1.4.2-1.el8.x86_64 - -Complete! -time="2022-02-03T20:29:19Z" level=warning msg="Adding metacopy option, configured globally" ---> 609507b059e -STEP 8/9: ENV "OPENSHIFT_BUILD_NAME"="my-csi-bc-1" "OPENSHIFT_BUILD_NAMESPACE"="my-csi-app-namespace" ---> cab2da3efc4 -STEP 9/9: LABEL "io.openshift.build.name"="my-csi-bc-1" "io.openshift.build.namespace"="my-csi-app-namespace" -COMMIT temp.builder.openshift.io/my-csi-app-namespace/my-csi-bc-1:edfe12ca ---> 821b582320b -Successfully tagged temp.builder.openshift.io/my-csi-app-namespace/my-csi-bc-1:edfe12ca -821b582320b41f1d7bab4001395133f86fa9cc99cc0b2b64c5a53f2b6750db91 -Build complete, no image push requested ----- -==== diff --git a/modules/builds-secrets-overview.adoc b/modules/builds-secrets-overview.adoc deleted file mode 100644 index 5f6e8287f932..000000000000 --- a/modules/builds-secrets-overview.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -[id="builds-secrets-overview_{context}"] -= What is a secret? - -The `Secret` object type provides a mechanism to hold sensitive information such as passwords, {product-title} client configuration files, `dockercfg` files, private source repository credentials, and so on. Secrets decouple sensitive content from the pods. You can mount secrets into containers using a volume plugin or the system can use secrets to perform actions on behalf of a pod. - -.YAML Secret Object Definition - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret - namespace: my-namespace -type: Opaque <1> -data: <2> - username: <3> - password: -stringData: <4> - hostname: myapp.mydomain.com <5> ----- -<1> Indicates the structure of the secret's key names and values. -<2> The allowable format for the keys in the `data` field must meet the guidelines in the `DNS_SUBDOMAIN` value in the Kubernetes identifiers glossary. -<3> The value associated with keys in the `data` map must be base64 encoded. -<4> Entries in the `stringData` map are converted to base64 and the entry are then moved to the `data` map automatically. This field is write-only. The value is only be returned by the `data` field. -<5> The value associated with keys in the `stringData` map is made up of plain text strings. - -[id="builds-secrets-overview-properties_{context}"] -== Properties of secrets - -Key properties include: - -* Secret data can be referenced independently from its definition. -* Secret data volumes are backed by temporary file-storage facilities (tmpfs) and never come to rest on a node. -* Secret data can be shared within a namespace. - -[id="builds-secrets-overview-types_{context}"] -== Types of Secrets - -The value in the `type` field indicates the structure of the secret's key names and values. The type can be used to enforce the presence of user names and keys in the secret object. If you do not want validation, use the `opaque` type, which is the default. - -Specify one of the following types to trigger minimal server-side validation to ensure the presence of specific key names in the secret data: - -* `kubernetes.io/service-account-token`. Uses a service account token. -* `kubernetes.io/dockercfg`. Uses the `.dockercfg` file for required Docker credentials. -* `kubernetes.io/dockerconfigjson`. Uses the `.docker/config.json` file for required Docker credentials. -* `kubernetes.io/basic-auth`. Use with basic authentication. -* `kubernetes.io/ssh-auth`. Use with SSH key authentication. -* `kubernetes.io/tls`. Use with TLS certificate authorities. - -Specify `type= Opaque` if you do not want validation, which means the secret does not claim to conform to any convention for key names or values. An `opaque` secret, allows for unstructured `key:value` pairs that can contain arbitrary values. - -[NOTE] -==== -You can specify other arbitrary types, such as `example.com/my-secret-type`. These types are not enforced server-side, but indicate that the creator of the -secret intended to conform to the key/value requirements of that type. -==== - -[id="builds-secrets-overview-updates_{context}"] -== Updates to secrets - -When you modify the value of a secret, the value used by an already running pod does not dynamically change. To change a secret, you must delete the original pod and create a new pod, in some cases with an identical `PodSpec`. - -Updating a secret follows the same workflow as deploying a new container image. You can use the `kubectl rolling-update` command. - -The `resourceVersion` value in a secret is not specified when it is referenced. Therefore, if a secret is updated at the same time as pods are starting, the version of the secret that is used for the pod is not defined. - -[NOTE] -==== -Currently, it is not possible to check the resource version of a secret object that was used when a pod was created. It is planned that pods report this information, so that a controller could restart ones using an old `resourceVersion`. In the interim, do not update the data of existing secrets, but create new ones with distinct names. -==== diff --git a/modules/builds-secrets-restrictions.adoc b/modules/builds-secrets-restrictions.adoc deleted file mode 100644 index c26236140854..000000000000 --- a/modules/builds-secrets-restrictions.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -[id="builds-secrets-restrictions_{context}"] -= Secrets restrictions - -To use a secret, a pod needs to reference the secret. A secret can be used with a pod in three ways: - -* To populate environment variables for containers. -* As files in a volume mounted on one or more of its containers. -* By kubelet when pulling images for the pod. - -Volume type secrets write data into the container as a file using the volume mechanism. `imagePullSecrets` use service accounts for the automatic injection of the secret into all pods in a namespaces. - -When a template contains a secret definition, the only way for the template to use the provided secret is to ensure that the secret volume sources are validated and that the specified object reference actually points to an object of type `Secret`. Therefore, a secret needs to be created before any pods that depend on it. The most effective way to ensure this is to have it get injected automatically through the use of a service account. - -Secret API objects reside in a namespace. They can only be referenced by pods in that same namespace. - -Individual secrets are limited to 1MB in size. This is to discourage the creation of large secrets that would exhaust apiserver and kubelet memory. However, creation of a number of smaller secrets could also exhaust memory. diff --git a/modules/builds-service-serving-certificate-secrets.adoc b/modules/builds-service-serving-certificate-secrets.adoc deleted file mode 100644 index d1c3ca70611d..000000000000 --- a/modules/builds-service-serving-certificate-secrets.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-service-serving-certificate-secrets_{context}"] -= Service serving certificate secrets - -Service serving certificate secrets are intended to support complex middleware applications that need out-of-the-box certificates. It has the same settings as the server certificates generated by the administrator tooling for nodes and masters. - -.Procedure - -To secure communication to your service, have the cluster generate a signed serving certificate/key pair into a secret in your namespace. - -* Set the `service.beta.openshift.io/serving-cert-secret-name` annotation on your service with the value set to the name you want to use for your secret. -+ -Then, your `PodSpec` can mount that secret. When it is available, your pod runs. The certificate is good for the internal service DNS name, `..svc`. -+ -The certificate and key are in PEM format, stored in `tls.crt` and `tls.key` respectively. The certificate/key pair is automatically replaced when it gets close to expiration. View the expiration date in the `service.beta.openshift.io/expiry` annotation on the secret, which is in RFC3339 format. - -[NOTE] -==== -In most cases, the service DNS name `..svc` is not externally routable. The primary use of `..svc` is for intracluster or intraservice communication, and with re-encrypt routes. -==== - -Other pods can trust cluster-created certificates, which are only signed for -internal DNS names, by using the certificate authority (CA) bundle in the `/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt` file that is automatically mounted in their pod. - -The signature algorithm for this feature is `x509.SHA256WithRSA`. To manually rotate, delete the generated secret. A new certificate is created. diff --git a/modules/builds-setting-build-resources.adoc b/modules/builds-setting-build-resources.adoc deleted file mode 100644 index 65cb9157f799..000000000000 --- a/modules/builds-setting-build-resources.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-setting-build-resources_{context}"] -= Setting build resources - -By default, builds are completed by pods using unbound resources, such as memory and CPU. These resources can be limited. - -.Procedure - -You can limit resource use in two ways: - -* Limit resource use by specifying resource limits in the default container limits of a project. -* Limit resource use by specifying resource limits as part of the build configuration. ** In the following example, each of the `resources`, `cpu`, and `memory` parameters are optional: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - resources: - limits: - cpu: "100m" <1> - memory: "256Mi" <2> ----- -<1> `cpu` is in CPU units: `100m` represents 0.1 CPU units (100 * 1e-3). -<2> `memory` is in bytes: `256Mi` represents 268435456 bytes (256 * 2 ^ 20). -+ -However, if a quota has been defined for your project, one of the following two items is required: -+ -*** A `resources` section set with an explicit `requests`: -+ -[source,yaml] ----- -resources: - requests: <1> - cpu: "100m" - memory: "256Mi" ----- -<1> The `requests` object contains the list of resources that correspond to the list of resources in the quota. -+ -*** A limit range defined in your project, where the defaults from the `LimitRange` object apply to pods created during the build process. -+ -Otherwise, build pod creation will fail, citing a failure to satisfy quota. diff --git a/modules/builds-setting-maximum-duration.adoc b/modules/builds-setting-maximum-duration.adoc deleted file mode 100644 index 7bf9472791d3..000000000000 --- a/modules/builds-setting-maximum-duration.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-setting-maximum-duration_{context}"] -= Setting maximum duration - -When defining a `BuildConfig` object, you can define its maximum duration by setting the `completionDeadlineSeconds` field. It is specified in seconds and is not set by default. When not set, there is no maximum duration enforced. - -The maximum duration is counted from the time when a build pod gets scheduled in the system, and defines how long it can be active, including the time needed to pull the builder image. After reaching the specified timeout, the build is terminated by {product-title}. - -.Procedure - -* To set maximum duration, specify `completionDeadlineSeconds` in your `BuildConfig`. The following example shows the part of a `BuildConfig` specifying `completionDeadlineSeconds` field for 30 minutes: -+ -[source,yaml] ----- -spec: - completionDeadlineSeconds: 1800 ----- - -[NOTE] -==== -This setting is not supported with the Pipeline Strategy option. -==== diff --git a/modules/builds-setting-triggers-manually.adoc b/modules/builds-setting-triggers-manually.adoc deleted file mode 100644 index 39037726d40b..000000000000 --- a/modules/builds-setting-triggers-manually.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-setting-triggers-manually_{context}"] -= Setting triggers manually - -Triggers can be added to and removed from build configurations with `oc set triggers`. - -.Procedure - -* To set a GitHub webhook trigger on a build configuration, use: -+ -[source,terminal] ----- -$ oc set triggers bc --from-github ----- - -* To set an imagechange trigger, use: -+ -[source,terminal] ----- -$ oc set triggers bc --from-image='' ----- - -* To remove a trigger, add `--remove`: -+ -[source,terminal] ----- -$ oc set triggers bc --from-bitbucket --remove ----- - -[NOTE] -==== -When a webhook trigger already exists, adding it again regenerates the webhook secret. -==== - -For more information, consult the help documentation with by running: - -[source,terminal] ----- -$ oc set triggers --help ----- diff --git a/modules/builds-source-code.adoc b/modules/builds-source-code.adoc deleted file mode 100644 index 41ec0a214aae..000000000000 --- a/modules/builds-source-code.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -//* builds/creating-build-inputs.adoc - -[id="builds-source-code_{context}"] -= Git source - -When specified, source code is fetched from the supplied location. - -ifndef::openshift-online[] -If you supply an inline Dockerfile, it overwrites the Dockerfile in the `contextDir` of the Git repository. -endif::[] - -The source definition is part of the `spec` section in the `BuildConfig`: - -[source,yaml] ----- -source: - git: <1> - uri: "https://github.com/openshift/ruby-hello-world" - ref: "master" - contextDir: "app/dir" <2> -ifndef::openshift-online[] - dockerfile: "FROM openshift/ruby-22-centos7\nUSER example" <3> -endif::[] ----- -<1> The `git` field contains the URI to the remote Git repository of the source code. Optionally, specify the `ref` field to check out a specific Git reference. A valid `ref` can be a SHA1 tag or a branch name. -<2> The `contextDir` field allows you to override the default location inside the source code repository where the build looks for the application source code. If your application exists inside a sub-directory, you can override the default location (the root folder) using this field. -ifndef::openshift-online[] -<3> If the optional `dockerfile` field is provided, it should be a string containing a Dockerfile that overwrites any Dockerfile that may exist in the source repository. -endif::[] - -If the `ref` field denotes a pull request, the system uses a `git fetch` operation and then checkout `FETCH_HEAD`. - -When no `ref` value is provided, {product-title} performs a shallow clone (`--depth=1`). In this case, only the files associated with the most recent commit on the default branch (typically `master`) are downloaded. This results in repositories downloading faster, but without the full commit history. To perform a full `git clone` of the default branch of a specified repository, set `ref` to the name of the default branch (for example `master`). - - -[WARNING] -==== -Git clone operations that go through a proxy that is performing man in the middle (MITM) TLS hijacking or reencrypting of the proxied connection do not work. -==== diff --git a/modules/builds-source-input-satellite-config.adoc b/modules/builds-source-input-satellite-config.adoc deleted file mode 100644 index f386639992e6..000000000000 --- a/modules/builds-source-input-satellite-config.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-source-input-satellite-config_{context}"] -= Adding Red Hat Satellite configurations to builds - -Builds that use Red Hat Satellite to install content must provide appropriate configurations to obtain content from Satellite repositories. - -.Prerequisites - -* You must provide or create a `yum`-compatible repository configuration file that downloads content from your Satellite instance. -+ -.Sample repository configuration -+ -[source,terminal] ----- -[test-] -name=test- -baseurl = https://satellite.../content/dist/rhel/server/7/7Server/x86_64/os -enabled=1 -gpgcheck=0 -sslverify=0 -sslclientkey = /etc/pki/entitlement/...-key.pem -sslclientcert = /etc/pki/entitlement/....pem ----- - -.Procedure - -. Create a `ConfigMap` containing the Satellite repository configuration file: -+ -[source,terminal] ----- -$ oc create configmap yum-repos-d --from-file /path/to/satellite.repo ----- - -. Add the Satellite repository configuration and entitlement key as a build volumes: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ubi:latest - volumes: - - name: yum-repos-d - mounts: - - destinationPath: /etc/yum.repos.d - source: - type: ConfigMap - configMap: - name: yum-repos-d - - name: etc-pki-entitlement - mounts: - - destinationPath: /etc/pki/entitlement - source: - type: Secret - secret: - secretName: etc-pki-entitlement ----- diff --git a/modules/builds-source-input-subman-config.adoc b/modules/builds-source-input-subman-config.adoc deleted file mode 100644 index 5f04670bd739..000000000000 --- a/modules/builds-source-input-subman-config.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -[id="builds-source-input-subman-config_{context}"] -= Adding Subscription Manager configurations to builds - -Builds that use the Subscription Manager to install content must provide appropriate configuration files and certificate authorities for subscribed repositories. - -.Prerequisites - -You must have access to the Subscription Manager's configuration and certificate authority files. - -.Procedure - -. Create a `ConfigMap` for the Subscription Manager configuration: -+ -[source,terminal] ----- -$ oc create configmap rhsm-conf --from-file /path/to/rhsm/rhsm.conf ----- - -. Create a `ConfigMap` for the certificate authority: -+ -[source,terminal] ----- -$ oc create configmap rhsm-ca --from-file /path/to/rhsm/ca/redhat-uep.pem ----- - -. Add the Subscription Manager configuration and certificate authority to the -`BuildConfig`: -+ -[source,yaml] ----- -source: - configMaps: - - configMap: - name: rhsm-conf - destinationDir: rhsm-conf - - configMap: - name: rhsm-ca - destinationDir: rhsm-ca ----- diff --git a/modules/builds-source-secret-basic-auth.adoc b/modules/builds-source-secret-basic-auth.adoc deleted file mode 100644 index 19d84dc40173..000000000000 --- a/modules/builds-source-secret-basic-auth.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-basic-auth_{context}"] -= Creating a secret from source code basic authentication - -Basic authentication requires either a combination of `--username` and `--password`, or a token to authenticate against the software configuration management (SCM) server. - -.Prerequisites - -* User name and password to access the private repository. - -.Procedure - -. Create the secret first before using the `--username` and `--password` to access the private repository: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --type=kubernetes.io/basic-auth ----- -+ -. Create a basic authentication secret with a token: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=password= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-ca.adoc b/modules/builds-source-secret-combinations-basic-auth-ca.adoc deleted file mode 100644 index 6c4e755705ab..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-ca.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-ca_{context}"] -= Creating a basic authentication secret with a CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication and certificate authority (CA) certificate. - -.Prerequisites - -* Basic authentication credentials -* CA certificate - -.Procedure - -* Create a basic authentication secret with a CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file=ca-cert= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc b/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc deleted file mode 100644 index a7236a9a7dab..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-gitconfig-ca_{context}"] -= Creating a basic authentication secret with a .gitconfig file and CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication, `.gitconfig` file, and certificate authority (CA) certificate. - -.Prerequisites - -* Basic authentication credentials -* `.gitconfig` file -* CA certificate - -.Procedure - -* To create a basic authentication secret with a `.gitconfig` file and CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file= \ - --from-file=ca-cert= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc b/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc deleted file mode 100644 index 8df61ec39b53..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-gitconfig_{context}"] -= Creating a basic authentication secret with a .gitconfig file - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication and `.gitconfig` file. - -.Prerequisites - -* Basic authentication credentials -* `.gitconfig` file - -.Procedure - -* To create a basic authentication secret with a `.gitconfig` file, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-gitconfig-ca.adoc b/modules/builds-source-secret-combinations-gitconfig-ca.adoc deleted file mode 100644 index debed8750cf9..000000000000 --- a/modules/builds-source-secret-combinations-gitconfig-ca.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-gitconfig-ca_{context}"] -= Creating a secret that combines a .gitconfig file and CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a `.gitconfig` file and certificate authority (CA) certificate. - -.Prerequisites - -* .gitconfig file -* CA certificate - -.Procedure - -* To create a secret that combines a `.gitconfig` file and CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ca.crt= \ - --from-file= ----- diff --git a/modules/builds-source-secret-combinations-ssh-gitconfig.adoc b/modules/builds-source-secret-combinations-ssh-gitconfig.adoc deleted file mode 100644 index 8c82cc2f5c19..000000000000 --- a/modules/builds-source-secret-combinations-ssh-gitconfig.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-ssh-gitconfig_{context}"] -= Creating a SSH-based authentication secret with a `.gitconfig` file - -You can combine the different methods for creating source clone secrets for your specific needs, such as a SSH-based authentication secret with a `.gitconfig` file. - -.Prerequisites - -* SSH authentication -* .gitconfig file - -.Procedure - -* To create a SSH-based authentication secret with a `.gitconfig` file, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ssh-privatekey= \ - --from-file= \ - --type=kubernetes.io/ssh-auth ----- diff --git a/modules/builds-source-secret-combinations.adoc b/modules/builds-source-secret-combinations.adoc deleted file mode 100644 index 128dc9356a8a..000000000000 --- a/modules/builds-source-secret-combinations.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-source-secret-combinations_{context}"] -= Source secret combinations - -You can combine the different methods for creating source clone secrets for your specific needs. diff --git a/modules/builds-source-secret-ssh-key-auth.adoc b/modules/builds-source-secret-ssh-key-auth.adoc deleted file mode 100644 index f85a5927b7c5..000000000000 --- a/modules/builds-source-secret-ssh-key-auth.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-ssh-key-auth_{context}"] -= Creating a secret from source code SSH key authentication - -SSH key based authentication requires a private SSH key. - -The repository keys are usually located in the `$HOME/.ssh/` directory, and are named `id_dsa.pub`, `id_ecdsa.pub`, `id_ed25519.pub`, or `id_rsa.pub` by default. - -.Procedure - -. Generate SSH key credentials: -+ -[source,terminal] ----- -$ ssh-keygen -t ed25519 -C "your_email@example.com" ----- -+ -[NOTE] -==== -Creating a passphrase for the SSH key prevents {product-title} from building. When prompted for a passphrase, leave it blank. -==== -+ -Two files are created: the public key and a corresponding private key (one of `id_dsa`, `id_ecdsa`, `id_ed25519`, or `id_rsa`). With both of these in place, consult your source control management (SCM) system's manual on how to upload -the public key. The private key is used to access your private repository. -+ -. Before using the SSH key to access the private repository, create the secret: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ssh-privatekey= \ - --from-file= \ <1> - --type=kubernetes.io/ssh-auth ----- -<1> Optional: Adding this field enables strict server host key check. -+ -[WARNING] -==== -Skipping the `known_hosts` file while creating the secret makes the build vulnerable to a potential man-in-the-middle (MITM) attack. -==== -+ -[NOTE] -==== -Ensure that the `known_hosts` file includes an entry for the host of your source code. -==== diff --git a/modules/builds-source-secret-trusted-ca.adoc b/modules/builds-source-secret-trusted-ca.adoc deleted file mode 100644 index d4ab011b1739..000000000000 --- a/modules/builds-source-secret-trusted-ca.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-trusted-ca_{context}"] -= Creating a secret from source code trusted certificate authorities - -The set of Transport Layer Security (TLS) certificate authorities (CA) that are trusted during a Git clone operation are built into the {product-title} infrastructure images. If your Git server uses a self-signed certificate or one signed by an authority not trusted by the image, you can create a secret that contains the certificate or disable TLS verification. - -If you create a secret for the CA certificate, {product-title} uses it to access your Git server during the Git clone operation. Using this method is significantly more secure than disabling Git SSL verification, which accepts any TLS certificate that is presented. - -.Procedure - -Create a secret with a CA certificate file. - -. If your CA uses Intermediate Certificate Authorities, combine the certificates for all CAs in a `ca.crt` file. Enter the following command: -+ -[source,terminal] ----- -$ cat intermediateCA.crt intermediateCA.crt rootCA.crt > ca.crt ----- - -.. Create the secret: -+ -[source,terminal] ----- -$ oc create secret generic mycert --from-file=ca.crt= <1> ----- -<1> You must use the key name `ca.crt`. diff --git a/modules/builds-source-secrets-entitlements.adoc b/modules/builds-source-secrets-entitlements.adoc deleted file mode 100644 index d96bbdaa2285..000000000000 --- a/modules/builds-source-secrets-entitlements.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-source-secrets-entitlements_{context}"] -= Adding subscription entitlements as a build secret - -Builds that use Red Hat subscriptions to install content must include the entitlement keys as a build secret. - -.Prerequisites - -You must have access to Red Hat entitlements through your subscription. The entitlement secret is automatically created by the Insights Operator. - - -[TIP] -==== -When you perform an Entitlement Build using {op-system-base-full} 7, you must have the following instructions in your Dockerfile before you run any `yum` commands: - -[source,terminal] ----- -RUN rm /etc/rhsm-host ----- -==== - -.Procedure - -. Add the etc-pki-entitlement secret as a build volume in the build configuration’s Docker strategy: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ubi:latest - volumes: - - name: etc-pki-entitlement - mounts: - - destinationPath: /etc/pki/entitlement - source: - type: Secret - secret: - secretName: etc-pki-entitlement ----- diff --git a/modules/builds-source-to-image.adoc b/modules/builds-source-to-image.adoc deleted file mode 100644 index fa824fd6a69b..000000000000 --- a/modules/builds-source-to-image.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-source-to-image_{context}"] -= Source-to-image strategy - -When using a `Source` strategy, all defined input secrets are copied to their respective `destinationDir`. If you left `destinationDir` empty, then the secrets are placed in the working directory of the builder image. - -The same rule is used when a `destinationDir` is a relative path. The secrets are placed in the paths that are relative to the working directory of the image. The final directory in the `destinationDir` path is created if it does not exist in the builder image. All preceding directories in the `destinationDir` must exist, or an error will occur. - -[NOTE] -==== -Input secrets are added as world-writable, have `0666` permissions, and are truncated to size zero after executing the `assemble` script. This means that the secret files exist in the resulting image, but they are empty for security reasons. - -Input config maps are not truncated after the `assemble` script completes. -==== diff --git a/modules/builds-strategy-custom-build.adoc b/modules/builds-strategy-custom-build.adoc deleted file mode 100644 index e21f003af6d3..000000000000 --- a/modules/builds-strategy-custom-build.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-strategies.adoc - -[id="builds-strategy-custom-build_{context}"] -= Custom build - -The custom build strategy allows developers to define a specific builder image responsible for the entire build process. Using your own builder image allows you to customize your build process. - -A custom builder image is a plain container image embedded with build process logic, for example for building RPMs or base images. - -Custom builds run with a high level of privilege and are not available to users by default. Only users who can be trusted with cluster administration permissions should be granted access to run custom builds. diff --git a/modules/builds-strategy-custom-environment-variables.adoc b/modules/builds-strategy-custom-environment-variables.adoc deleted file mode 100644 index 0f34f2d3a76d..000000000000 --- a/modules/builds-strategy-custom-environment-variables.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-environment-variables_{context}"] -= Using environment variables for custom builds - -To make environment variables available to the custom build process, you can add environment variables to the `customStrategy` definition of the build configuration. - -The environment variables defined there are passed to the pod that runs the -custom build. - -.Procedure - -. Define a custom HTTP proxy to be used during build: -+ -[source,yaml] ----- -customStrategy: -... - env: - - name: "HTTP_PROXY" - value: "http://myproxy.net:5187/" ----- -+ -. To manage environment variables defined in the build configuration, enter the following command: -+ -[source,terminal] ----- -$ oc set env ----- diff --git a/modules/builds-strategy-custom-from-image.adoc b/modules/builds-strategy-custom-from-image.adoc deleted file mode 100644 index 944868031aa8..000000000000 --- a/modules/builds-strategy-custom-from-image.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-from-image_{context}"] -= Using FROM image for custom builds - -You can use the `customStrategy.from` section to indicate the image to use for the custom build - -.Procedure - -* Set the `customStrategy.from` section: -+ -[source,yaml] ----- -strategy: - customStrategy: - from: - kind: "DockerImage" - name: "openshift/sti-image-builder" ----- diff --git a/modules/builds-strategy-custom-secrets.adoc b/modules/builds-strategy-custom-secrets.adoc deleted file mode 100644 index 5689e522dcad..000000000000 --- a/modules/builds-strategy-custom-secrets.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-secrets_{context}"] -= Using secrets in custom builds - -In addition to secrets for source and images that can be added to all build types, custom strategies allow adding an arbitrary list of secrets to the builder pod. - -.Procedure - -* To mount each secret at a specific location, edit the `secretSource` and `mountPath` fields of the `strategy` YAML file: -+ -[source,yaml] ----- -strategy: - customStrategy: - secrets: - - secretSource: <1> - name: "secret1" - mountPath: "/tmp/secret1" <2> - - secretSource: - name: "secret2" - mountPath: "/tmp/secret2" ----- -<1> `secretSource` is a reference to a secret in the same namespace as the build. -<2> `mountPath` is the path inside the custom builder where the secret should be mounted. diff --git a/modules/builds-strategy-docker-build-arguments.adoc b/modules/builds-strategy-docker-build-arguments.adoc deleted file mode 100644 index b43dbe77cee7..000000000000 --- a/modules/builds-strategy-docker-build-arguments.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-build-arguments_{context}"] -= Adding docker build arguments - -You can set link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[docker build arguments] using the `buildArgs` array. The build arguments are passed to docker when a build is started. - -[TIP] -==== -See link:https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact[Understand how ARG and FROM interact] in the Dockerfile reference documentation. -==== - -.Procedure - -To set docker build arguments, add entries to the `buildArgs` array, which is located in the `dockerStrategy` definition of the `BuildConfig` object. For example: - -[source,yaml] ----- -dockerStrategy: -... - buildArgs: - - name: "foo" - value: "bar" ----- - -[NOTE] -==== -Only the `name` and `value` fields are supported. Any settings on the `valueFrom` field are ignored. -==== diff --git a/modules/builds-strategy-docker-entitled-satellite.adoc b/modules/builds-strategy-docker-entitled-satellite.adoc deleted file mode 100644 index 845c1c9b4861..000000000000 --- a/modules/builds-strategy-docker-entitled-satellite.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-entitled-satellite_{context}"] -= Docker builds using Red Hat Satellite subscriptions - -Docker strategy builds can use Red Hat Satellite repositories to install subscription content. - -.Prerequisites - -* You have added the entitlement keys and Satellite repository configurations as build volumes. - -.Procedure - -Use the following as an example Dockerfile to install content with Satellite: - -[source,terminal] ----- -FROM registry.redhat.io/ubi9/ubi:latest -RUN dnf search kernel-devel --showduplicates && \ - dnf install -y kernel-devel ----- diff --git a/modules/builds-strategy-docker-entitled-subman.adoc b/modules/builds-strategy-docker-entitled-subman.adoc deleted file mode 100644 index 3a52967a0c7f..000000000000 --- a/modules/builds-strategy-docker-entitled-subman.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-entitled-subman_{context}"] -= Docker builds using Subscription Manager - -Docker strategy builds can use the Subscription Manager to install subscription content. - -.Prerequisites - -The entitlement keys must be added as build strategy volumes. - -.Procedure - -Use the following as an example Dockerfile to install content with the Subscription Manager: - -[source,terminal] ----- -FROM registry.redhat.io/ubi9/ubi:latest -RUN dnf search kernel-devel --showduplicates && \ - dnf install -y kernel-devel ----- diff --git a/modules/builds-strategy-docker-environment-variables.adoc b/modules/builds-strategy-docker-environment-variables.adoc deleted file mode 100644 index d5d023f8b213..000000000000 --- a/modules/builds-strategy-docker-environment-variables.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-environment-variables_{context}"] -= Using docker environment variables - -To make environment variables available to the docker build process and resulting image, you can add environment variables to the `dockerStrategy` definition of the build configuration. - -The environment variables defined there are inserted as a single `ENV` Dockerfile instruction right after the `FROM` instruction, so that it can be referenced later on within the Dockerfile. - -.Procedure - -The variables are defined during build and stay in the output image, therefore they will be present in any container that runs that image as well. - -For example, defining a custom HTTP proxy to be used during build and runtime: - -[source,yaml] ----- -dockerStrategy: -... - env: - - name: "HTTP_PROXY" - value: "http://myproxy.net:5187/" ----- - -You can also manage environment variables defined in the build configuration with the `oc set env` command. diff --git a/modules/builds-strategy-docker-force-pull-example.adoc b/modules/builds-strategy-docker-force-pull-example.adoc deleted file mode 100644 index a6d63f81a576..000000000000 --- a/modules/builds-strategy-docker-force-pull-example.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-docker-force-pull-example_{context}"] -= Docker force pull flag example - -Set the following to use the `forcePull` flag with Docker: - -[source,yaml] ----- -strategy: - dockerStrategy: - forcePull: true <1> ----- -<1> This flag causes the local builder image to be ignored, and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/builds-strategy-docker-from-image.adoc b/modules/builds-strategy-docker-from-image.adoc deleted file mode 100644 index 5a20f963a70b..000000000000 --- a/modules/builds-strategy-docker-from-image.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-from-image_{context}"] -= Replacing Dockerfile FROM image - -You can replace the `FROM` instruction of the Dockerfile with the `from` of the `BuildConfig` object. If the Dockerfile uses multi-stage builds, the image in the last `FROM` instruction will be replaced. - -.Procedure - -To replace the `FROM` instruction of the Dockerfile with the `from` of the `BuildConfig`. - -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: "ImageStreamTag" - name: "debian:latest" ----- diff --git a/modules/builds-strategy-docker-squash-layers.adoc b/modules/builds-strategy-docker-squash-layers.adoc deleted file mode 100644 index f4f5eeab55ae..000000000000 --- a/modules/builds-strategy-docker-squash-layers.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-squash-layers_{context}"] -= Squashing layers with docker builds - -Docker builds normally create a layer representing each instruction in a Dockerfile. Setting the `imageOptimizationPolicy` to `SkipLayers` merges all instructions into a single layer on top of the base image. - -.Procedure - -* Set the `imageOptimizationPolicy` to `SkipLayers`: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - imageOptimizationPolicy: SkipLayers ----- diff --git a/modules/builds-strategy-dockerfile-path.adoc b/modules/builds-strategy-dockerfile-path.adoc deleted file mode 100644 index 588e23dc482b..000000000000 --- a/modules/builds-strategy-dockerfile-path.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-dockerfile-path_{context}"] -= Using Dockerfile path - -By default, docker builds use a Dockerfile located at the root of the context specified in the `BuildConfig.spec.source.contextDir` field. - -The `dockerfilePath` field allows the build to use a different path to locate your Dockerfile, relative to the `BuildConfig.spec.source.contextDir` field. It can be a different file name than the default Dockerfile, such as `MyDockerfile`, or a path to a Dockerfile in a subdirectory, such as `dockerfiles/app1/Dockerfile`. - -.Procedure - -To use the `dockerfilePath` field for the build to use a different path to locate your Dockerfile, set: - -[source,yaml] ----- -strategy: - dockerStrategy: - dockerfilePath: dockerfiles/app1/Dockerfile ----- diff --git a/modules/builds-strategy-enable-pulling-pushing.adoc b/modules/builds-strategy-enable-pulling-pushing.adoc deleted file mode 100644 index 93e9d9b706d7..000000000000 --- a/modules/builds-strategy-enable-pulling-pushing.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-enable-pulling-pushing_{context}"] -= Enabling pulling and pushing - -You can enable pulling to a private registry by setting the pull secret and pushing by setting the push secret in the build configuration. - -.Procedure - -To enable pulling to a private registry: - -* Set the pull secret in the build configuration. - -To enable pushing: - - * Set the push secret in the build configuration. - -//// -[NOTE] -==== -This module needs specific instructions and examples. And needs to be used for Docker and S2I. -==== -//// diff --git a/modules/builds-strategy-force-pull-procedure.adoc b/modules/builds-strategy-force-pull-procedure.adoc deleted file mode 100644 index bfc8efea84a1..000000000000 --- a/modules/builds-strategy-force-pull-procedure.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -[id="builds-strategy-force-pull-procedure_{context}"] -= Using the force pull flag - -By default, if the builder image specified in the build configuration is available locally on the node, that image will be used. However, you can use the `forcepull` flag to override the local image and refresh it from the registry. - -.Procedure - -To override the local image and refresh it from the registry to which the image stream points, create a `BuildConfig` with the `forcePull` flag set to `true`. diff --git a/modules/builds-strategy-pipeline-build.adoc b/modules/builds-strategy-pipeline-build.adoc deleted file mode 100644 index a1caef9af190..000000000000 --- a/modules/builds-strategy-pipeline-build.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -//*builds/build-strategies.adoc -//*builds/understanding-image-builds - -[id="builds-strategy-pipeline-build_{context}"] -= Pipeline build - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -The Pipeline build strategy allows developers to define a Jenkins pipeline for use by the Jenkins pipeline plugin. The build can be started, monitored, and managed by {product-title} in the same way as any other build type. - -Pipeline workflows are defined in a `jenkinsfile`, either embedded directly in the build configuration, or supplied in a Git repository and referenced by the build configuration. - -//The first time a project defines a build configuration using a Pipeline -//strategy, {product-title} instantiates a Jenkins server to execute the -//pipeline. Subsequent Pipeline build configurations in the project share this -//Jenkins server. - -//[role="_additional-resources"] -//.Additional resources - -//* Pipeline build configurations require a Jenkins server to manage the -//pipeline execution. diff --git a/modules/builds-strategy-pipeline-environment-variables.adoc b/modules/builds-strategy-pipeline-environment-variables.adoc deleted file mode 100644 index b66b2f868efe..000000000000 --- a/modules/builds-strategy-pipeline-environment-variables.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-pipeline-environment-variables_{context}"] -= Using environment variables for pipeline builds - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -To make environment variables available to the Pipeline build process, you can add environment variables to the `jenkinsPipelineStrategy` definition of the build configuration. - -Once defined, the environment variables will be set as parameters for any Jenkins job associated with the build configuration. - -.Procedure - -* To define environment variables to be used during build, edit the YAML file: -+ -[source,yaml] ----- -jenkinsPipelineStrategy: -... - env: - - name: "FOO" - value: "BAR" ----- - -You can also manage environment variables defined in the build configuration with the `oc set env` command. - - - -//[NOTE] -//==== -// This module needs specific instructions and examples. -// This is similar between Docker, S2I, and Custom. -//==== diff --git a/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc b/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc deleted file mode 100644 index 93912c1694d9..000000000000 --- a/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-pipeline-mapping-buildconfig-jenkins_{context}"] -= Mapping between BuildConfig environment variables and Jenkins job parameters - -When a Jenkins job is created or updated based on changes to a Pipeline strategy build configuration, any environment variables in the build configuration are mapped to Jenkins job parameters definitions, where the default values for the Jenkins job parameters definitions are the current values of the associated environment variables. - -After the Jenkins job's initial creation, you can still add additional parameters to the job from the Jenkins console. The parameter names differ from the names of the environment variables in the build configuration. The parameters are honored when builds are started for those Jenkins jobs. - -How you start builds for the Jenkins job dictates how the parameters are set. - -* If you start with `oc start-build`, the values of the environment variables in the build configuration are the parameters set for the corresponding job instance. Any changes you make to the parameters' default values from the Jenkins console are ignored. The build configuration values take precedence. - -* If you start with `oc start-build -e`, the values for the environment variables specified in the `-e` option take precedence. -** If you specify an environment variable not listed in the build configuration, they will be added as a Jenkins job parameter definitions. -** Any changes you make from the Jenkins console to the parameters corresponding to the environment variables are ignored. The build configuration and what you specify with `oc start-build -e` takes precedence. - -* If you start the Jenkins job with the Jenkins console, then you can control the setting of the parameters with the Jenkins console as part of starting a build for the job. - -[NOTE] -==== -It is recommended that you specify in the build configuration all possible environment variables to be associated with job parameters. Doing so reduces disk I/O and improves performance during Jenkins processing. -==== diff --git a/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc b/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc deleted file mode 100644 index e1dd011463bb..000000000000 --- a/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-pipeline-providing-jenkinsfile_{context}"] -= Providing the Jenkins file for pipeline builds - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -The `jenkinsfile` uses the standard groovy language syntax to allow fine grained control over the configuration, build, and deployment of your application. - -You can supply the `jenkinsfile` in one of the following ways: - -* A file located within your source code repository. -* Embedded as part of your build configuration using the `jenkinsfile` field. - -When using the first option, the `jenkinsfile` must be included in your applications source code repository at one of the following locations: - -* A file named `jenkinsfile` at the root of your repository. -* A file named `jenkinsfile` at the root of the source `contextDir` of your repository. -* A file name specified via the `jenkinsfilePath` field of the `JenkinsPipelineStrategy` section of your BuildConfig, which is relative to the source `contextDir` if supplied, otherwise it defaults to the root of the repository. - -The `jenkinsfile` is run on the Jenkins agent pod, which must have the -{product-title} client binaries available if you intend to use the {product-title} DSL. - -.Procedure - -To provide the Jenkins file, you can either: - -* Embed the Jenkins file in the build configuration. -* Include in the build configuration a reference to the Git repository that contains the Jenkins file. - -.Embedded Definition -[source,yaml] ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "sample-pipeline" -spec: - strategy: - jenkinsPipelineStrategy: - jenkinsfile: |- - node('agent') { - stage 'build' - openshiftBuild(buildConfig: 'ruby-sample-build', showBuildLogs: 'true') - stage 'deploy' - openshiftDeploy(deploymentConfig: 'frontend') - } ----- - -.Reference to Git Repository -[source,yaml] ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "sample-pipeline" -spec: - source: - git: - uri: "https://github.com/openshift/ruby-hello-world" - strategy: - jenkinsPipelineStrategy: - jenkinsfilePath: some/repo/dir/filename <1> ----- -<1> The optional `jenkinsfilePath` field specifies the name of the file to use, relative to the source `contextDir`. If `contextDir` is omitted, it defaults to the root of the repository. If `jenkinsfilePath` is omitted, it defaults to `jenkinsfile`. diff --git a/modules/builds-strategy-s2i-build.adoc b/modules/builds-strategy-s2i-build.adoc deleted file mode 100644 index e8d51fde8f34..000000000000 --- a/modules/builds-strategy-s2i-build.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc -//* builds/understanding-image-builds.adoc - -[id="builds-strategy-s2i-build_{context}"] -= Source-to-image build - -Source-to-image (S2I) is a tool for building reproducible container images. It produces ready-to-run images by injecting application source into a container image and assembling a new image. The new image incorporates the base image, the builder, and built source and is ready to use with the `buildah run` command. S2I supports incremental builds, which re-use previously downloaded dependencies, previously built artifacts, and so on. - - -//// -The advantages of S2I include the following: - -[horizontal] -Image flexibility:: S2I scripts can be written to inject application code into almost any existing Docker-formatted container image, taking advantage of the existing ecosystem. Note that, currently, S2I relies on `tar` to inject application source, so the image needs to be able to process tarred content. - -Speed:: With S2I, the assemble process can perform a large number of complex operations without creating a new layer at each step, resulting in a fast process. In addition, S2I scripts can be written to re-use artifacts stored in a previous version of the application image, rather than having to download or build them each time the build is run. - -Patchability:: S2I allows you to rebuild the application consistently if an underlying image needs a patch due to a security issue. - -Operational efficiency:: By restricting build operations instead of allowing arbitrary actions, as a Dockerfile would allow, the PaaS operator can avoid accidental or intentional abuses of the build system. - -Operational security:: Building an arbitrary Dockerfile exposes the host system to root privilege escalation. This can be exploited by a malicious user because the entire Docker build process is run as a user with Docker privileges. S2I restricts the operations performed as a root user and can run the scripts as a non-root user. - -User efficiency:: S2I prevents developers from performing arbitrary `yum install` type operations, which could slow down development iteration, during their application build. - -Ecosystem:: S2I encourages a shared ecosystem of images where you can leverage best practices for your applications. - -Reproducibility:: Produced images can include all inputs including specific versions of build tools and dependencies. This ensures that the image can be reproduced precisely. -//// diff --git a/modules/builds-strategy-s2i-buildconfig-environment.adoc b/modules/builds-strategy-s2i-buildconfig-environment.adoc deleted file mode 100644 index ac7626313953..000000000000 --- a/modules/builds-strategy-s2i-buildconfig-environment.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -//* * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-buildconfig-environment_{context}"] -= Using source-to-image build configuration environment - -You can add environment variables to the `sourceStrategy` definition of the build configuration. The environment variables defined there are visible during the `assemble` script execution and will be defined in the output image, making them also available to the `run` script and application code. - -.Procedure - -* For example, to disable assets compilation for your Rails application: -+ -[source,yaml] ----- -sourceStrategy: -... - env: - - name: "DISABLE_ASSET_COMPILATION" - value: "true" ----- - -[role="_additional-resources"] -.Additional resources - -* The build environment section provides more advanced instructions. -* You can also manage environment variables defined in the build configuration with the `oc set env` command. diff --git a/modules/builds-strategy-s2i-environment-files.adoc b/modules/builds-strategy-s2i-environment-files.adoc deleted file mode 100644 index e93d9ad7bc91..000000000000 --- a/modules/builds-strategy-s2i-environment-files.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-environment-files_{context}"] -= Using source-to-image environment files - -Source build enables you to set environment values, one per line, inside your application, by specifying them in a `.s2i/environment` file in the source repository. The environment variables specified in this file are present during the build process and in the output image. - -If you provide a `.s2i/environment` file in your source repository, source-to-image (S2I) reads this file during the build. This allows customization of the build behavior as the `assemble` script may use these variables. - -.Procedure - -For example, to disable assets compilation for your Rails application during the build: - -* Add `DISABLE_ASSET_COMPILATION=true` in the `.s2i/environment` file. - -In addition to builds, the specified environment variables are also available in the running application itself. For example, to cause the Rails application to start in `development` mode instead of `production`: - -* Add `RAILS_ENV=development` to the `.s2i/environment` file. - - -The complete list of supported environment variables is available in the using images section for each image. diff --git a/modules/builds-strategy-s2i-environment-variables.adoc b/modules/builds-strategy-s2i-environment-variables.adoc deleted file mode 100644 index 027e0fd17889..000000000000 --- a/modules/builds-strategy-s2i-environment-variables.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-environment-variables_{context}"] -= Source-to-image environment variables - -There are two ways to make environment variables available to the source build process and resulting image. Environment files and BuildConfig environment values. Variables provided will be present during the build process and in the output image. diff --git a/modules/builds-strategy-s2i-force-pull-example.adoc b/modules/builds-strategy-s2i-force-pull-example.adoc deleted file mode 100644 index 85f659bdd1a2..000000000000 --- a/modules/builds-strategy-s2i-force-pull-example.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-force-pull-example_{context}"] -= Source-to-Image (S2I) force pull flag example - -Set the following to use the `forcePull` flag with S2I: - -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "builder-image:latest" <1> - forcePull: true <2> ----- -<1> The builder image being used, where the local version on the node may not be up to date with the version in the registry to which the imagestream points. -<2> This flag causes the local builder image to be ignored and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/builds-strategy-s2i-ignore-source-files.adoc b/modules/builds-strategy-s2i-ignore-source-files.adoc deleted file mode 100644 index c7d3d76d7d48..000000000000 --- a/modules/builds-strategy-s2i-ignore-source-files.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-ignore-source-files_{context}"] -= Ignoring source-to-image source files - -Source-to-image (S2I) supports a `.s2iignore` file, which contains a list of file patterns that should be ignored. Files in the build working directory, as provided by the various input sources, that match a pattern found in the `.s2iignore` file will not be made available to the `assemble` script. - -//For more details on the format of the `.s2iignore` file, see the S2I documentation. diff --git a/modules/builds-strategy-s2i-incremental-builds.adoc b/modules/builds-strategy-s2i-incremental-builds.adoc deleted file mode 100644 index 0cb82d974fec..000000000000 --- a/modules/builds-strategy-s2i-incremental-builds.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-incremental-builds_{context}"] -= Performing source-to-image incremental builds - -Source-to-image (S2I) can perform incremental builds, which means it reuses artifacts from previously-built images. - -.Procedure - -* To create an incremental build, apply the following modification to the strategy definition: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "incremental-image:latest" <1> - incremental: true <2> ----- -<1> Specify an image that supports incremental builds. Consult the documentation of the builder image to determine if it supports this behavior. -<2> This flag controls whether an incremental build is attempted. If the builder image does not support incremental builds, the build will still succeed, but you will get a log message stating the incremental build was not successful because of a missing `save-artifacts` script. - -[role="_additional-resources"] -.Additional resources - -* See S2I Requirements for information on how to create a builder image supporting incremental builds. diff --git a/modules/builds-strategy-s2i-override-builder-image-scripts.adoc b/modules/builds-strategy-s2i-override-builder-image-scripts.adoc deleted file mode 100644 index f0a81b4c49e2..000000000000 --- a/modules/builds-strategy-s2i-override-builder-image-scripts.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-override-builder-image-scripts_{context}"] -= Overriding source-to-image builder image scripts - -You can override the `assemble`, `run`, and `save-artifacts` source-to-image (S2I) scripts provided by the builder image. - -.Procedure - -To override the `assemble`, `run`, and `save-artifacts` S2I scripts provided by the builder image, either: - -* Provide an `assemble`, `run`, or `save-artifacts` script in the `.s2i/bin` directory of your application source repository. -* Provide a URL of a directory containing the scripts as part of the strategy definition. For example: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "builder-image:latest" - scripts: "http://somehost.com/scripts_directory" <1> ----- -<1> This path will have `run`, `assemble`, and `save-artifacts` appended to it. If any or all scripts are found they will be used in place of the same named scripts provided in the image. - -[NOTE] -==== -Files located at the `scripts` URL take precedence over files located in `.s2i/bin` of the source repository. -==== diff --git a/modules/builds-strategy-secrets-web-console.adoc b/modules/builds-strategy-secrets-web-console.adoc deleted file mode 100644 index f1af2f37776e..000000000000 --- a/modules/builds-strategy-secrets-web-console.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-secrets-web-console_{context}"] -= Adding secrets with web console - -You can add a secret to your build configuration so that it can access a private repository. - -.Procedure - -To add a secret to your build configuration so that it can access a private -repository from the {product-title} web console: - -. Create a new {product-title} project. - -. Create a secret that contains credentials for accessing a private source code -repository. - -. Create a build configuration. - -. On the build configuration editor page or in the `create app from builder image` page of the web console, set the *Source Secret*. - -. Click *Save*. - - -//[NOTE] -//==== -// This module needs specific instructions and examples. -// This is applicable for Docker, S2I, and Custom. -//==== diff --git a/modules/builds-triggers.adoc b/modules/builds-triggers.adoc deleted file mode 100644 index a2ec77d47ca7..000000000000 --- a/modules/builds-triggers.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-triggers_{context}"] -= Build triggers - -When defining a `BuildConfig`, you can define triggers to control the circumstances in which the `BuildConfig` should be run. The following build triggers are available: - -* Webhook -* Image change -* Configuration change diff --git a/modules/builds-troubleshooting-access-resources.adoc b/modules/builds-troubleshooting-access-resources.adoc deleted file mode 100644 index 678a0656c2ad..000000000000 --- a/modules/builds-troubleshooting-access-resources.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/troubleshooting-builds.adoc - -[id="builds-troubleshooting-access-resources_{context}"] -= Resolving denial for access to resources - -If your request for access to resources is denied: - -Issue:: -A build fails with: - -[source,terminal] ----- -requested access to the resource is denied ----- - -Resolution:: -You have exceeded one of the image quotas set on your project. Check your current quota and verify the limits applied and storage in use: - -[source,terminal] ----- -$ oc describe quota ----- diff --git a/modules/builds-troubleshooting-service-certificate-generation.adoc b/modules/builds-troubleshooting-service-certificate-generation.adoc deleted file mode 100644 index 42f3a772cb49..000000000000 --- a/modules/builds-troubleshooting-service-certificate-generation.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// *builds/troubleshooting-builds.adoc - -[id="builds-troubleshooting-service-certificate-generation_{context}"] -= Service certificate generation failure - -If your request for access to resources is denied: - -Issue:: -If a service certificate generation fails with (service's `service.beta.openshift.io/serving-cert-generation-error` annotation contains): - -.Example output -[source,terminal] ----- -secret/ssl-key references serviceUID 62ad25ca-d703-11e6-9d6f-0e9c0057b608, which does not match 77b6dd80-d716-11e6-9d6f-0e9c0057b60 ----- - -Resolution:: -The service that generated the certificate no longer exists, or has a different `serviceUID`. You must force certificates regeneration by removing the old secret, and clearing the following annotations on the service: `service.beta.openshift.io/serving-cert-generation-error` and `service.beta.openshift.io/serving-cert-generation-error-num`: - -[source,terminal] ----- -$ oc delete secret ----- - -[source,terminal] ----- -$ oc annotate service service.beta.openshift.io/serving-cert-generation-error- ----- - -[source,terminal] ----- -$ oc annotate service service.beta.openshift.io/serving-cert-generation-error-num- ----- - -[NOTE] -==== -The command removing annotation has a `-` after the annotation name to be -removed. -==== diff --git a/modules/builds-tutorial-pipeline.adoc b/modules/builds-tutorial-pipeline.adoc deleted file mode 100644 index a47415c5ae5f..000000000000 --- a/modules/builds-tutorial-pipeline.adoc +++ /dev/null @@ -1,224 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-tutorial-pipeline_{context}"] -= Pipeline build tutorial - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -This example demonstrates how to create an {product-title} Pipeline that will build, deploy, and verify a `Node.js/MongoDB` application using the `nodejs-mongodb.json` template. - -.Procedure - -. Create the Jenkins master: -+ -[source,terminal] ----- - $ oc project ----- -+ -Select the project that you want to use or create a new project with `oc new-project `. -+ -[source,terminal] ----- - $ oc new-app jenkins-ephemeral <2> ----- -+ -If you want to use persistent storage, use `jenkins-persistent` instead. -+ -. Create a file named `nodejs-sample-pipeline.yaml` with the following content: -+ -[NOTE] -==== -This creates a `BuildConfig` object that employs the Jenkins pipeline strategy to build, deploy, and scale the `Node.js/MongoDB` example application. -==== -+ -[source,yaml] -+ ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "nodejs-sample-pipeline" -spec: - strategy: - jenkinsPipelineStrategy: - jenkinsfile: - type: JenkinsPipeline ----- -+ -. After you create a `BuildConfig` object with a `jenkinsPipelineStrategy`, tell the -pipeline what to do by using an inline `jenkinsfile`: -+ -[NOTE] -==== -This example does not set up a Git repository for the application. - -The following `jenkinsfile` content is written in Groovy using the {product-title} DSL. For this example, include inline content in the `BuildConfig` object using the YAML Literal Style, though including a `jenkinsfile` in your source repository is the preferred method. -==== -+ -[source,groovy] ----- -def templatePath = 'https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json' <1> -def templateName = 'nodejs-mongodb-example' <2> -pipeline { - agent { - node { - label 'nodejs' <3> - } - } - options { - timeout(time: 20, unit: 'MINUTES') <4> - } - stages { - stage('preamble') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - echo "Using project: ${openshift.project()}" - } - } - } - } - } - stage('cleanup') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.selector("all", [ template : templateName ]).delete() <5> - if (openshift.selector("secrets", templateName).exists()) { <6> - openshift.selector("secrets", templateName).delete() - } - } - } - } - } - } - stage('create') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.newApp(templatePath) <7> - } - } - } - } - } - stage('build') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - def builds = openshift.selector("bc", templateName).related('builds') - timeout(5) { <8> - builds.untilEach(1) { - return (it.object().status.phase == "Complete") - } - } - } - } - } - } - } - stage('deploy') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - def rm = openshift.selector("dc", templateName).rollout() - timeout(5) { <9> - openshift.selector("dc", templateName).related('pods').untilEach(1) { - return (it.object().status.phase == "Running") - } - } - } - } - } - } - } - stage('tag') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.tag("${templateName}:latest", "${templateName}-staging:latest") <10> - } - } - } - } - } - } -} ----- -<1> Path of the template to use. -<2> Name of the template that will be created. -<3> Spin up a `node.js` agent pod on which to run this build. -<4> Set a timeout of 20 minutes for this pipeline. -<5> Delete everything with this template label. -<6> Delete any secrets with this template label. -<7> Create a new application from the `templatePath`. -<8> Wait up to five minutes for the build to complete. -<9> Wait up to five minutes for the deployment to complete. -<10> If everything else succeeded, tag the `$ {templateName}:latest` image as -`$ {templateName}-staging:latest`. A pipeline build configuration for the staging -environment can watch for the `$ {templateName}-staging:latest` image to change -and then deploy it to the staging environment. -+ -[NOTE] -==== -The previous example was written using the declarative pipeline style, but the older scripted pipeline style is also supported. -==== -+ -. Create the Pipeline `BuildConfig` in your {product-title} cluster: -+ -[source,terminal] ----- -$ oc create -f nodejs-sample-pipeline.yaml ----- -+ -.. If you do not want to create your own file, you can use the sample from the Origin repository by running: -+ -[source,terminal] ----- -$ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/nodejs-sample-pipeline.yaml ----- -+ -. Start the Pipeline: -+ -[source,terminal] ----- -$ oc start-build nodejs-sample-pipeline ----- -+ -[NOTE] -==== -Alternatively, you can start your pipeline with the {product-title} web console by navigating to the Builds -> Pipeline section and clicking *Start Pipeline*, or by visiting the Jenkins Console, navigating to the Pipeline that you created, and clicking *Build Now*. -==== -+ -Once the pipeline is started, you should see the following actions performed within your project: -+ -* A job instance is created on the Jenkins server. -* An agent pod is launched, if your pipeline requires one. -* The pipeline runs on the agent pod, or the master if no agent is required. -** Any previously created resources with the `template=nodejs-mongodb-example` label will be deleted. -** A new application, and all of its associated resources, will be created from the `nodejs-mongodb-example` template. -** A build will be started using the `nodejs-mongodb-example` `BuildConfig`. -*** The pipeline will wait until the build has completed to trigger the next stage. -** A deployment will be started using the `nodejs-mongodb-example` deployment configuration. -*** The pipeline will wait until the deployment has completed to trigger the next stage. -** If the build and deploy are successful, the `nodejs-mongodb-example:latest` image will be tagged as `nodejs-mongodb-example:stage`. -* The agent pod is deleted, if one was required for the pipeline. -+ -[NOTE] -==== -The best way to visualize the pipeline execution is by viewing it in the {product-title} web console. You can view your pipelines by logging in to the web console and navigating to Builds -> Pipelines. -==== diff --git a/modules/builds-understanding-openshift-pipeline.adoc b/modules/builds-understanding-openshift-pipeline.adoc deleted file mode 100644 index af71e4a98ff0..000000000000 --- a/modules/builds-understanding-openshift-pipeline.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -:_content-type: CONCEPT -[id="builds-understanding-openshift-pipeline_{context}"] -= Understanding {product-title} pipelines - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -Pipelines give you control over building, deploying, and promoting your applications on {product-title}. Using a combination of the Jenkins Pipeline build strategy, `jenkinsfiles`, and the {product-title} Domain Specific Language (DSL) provided by the Jenkins Client Plugin, you can create advanced build, test, deploy, and promote pipelines for any scenario. - -*{product-title} Jenkins Sync Plugin* - -The {product-title} Jenkins Sync Plugin keeps the build configuration and build objects in sync with Jenkins jobs and builds, and provides the following: - - * Dynamic job and run creation in Jenkins. - * Dynamic creation of agent pod templates from image streams, image stream tags, or config maps. - * Injection of environment variables. - * Pipeline visualization in the {product-title} web console. - * Integration with the Jenkins Git plugin, which passes commit information from {product-title} builds to the Jenkins Git plugin. - * Synchronization of secrets into Jenkins credential entries. - -*{product-title} Jenkins Client Plugin* - -The {product-title} Jenkins Client Plugin is a Jenkins plugin which aims to provide a readable, concise, comprehensive, and fluent Jenkins Pipeline syntax for rich interactions with an {product-title} API Server. The plugin uses the {product-title} command line tool, `oc`, which must be available on the nodes executing the script. - -The Jenkins Client Plugin must be installed on your Jenkins master so the {product-title} DSL will be available to use within the `jenkinsfile` for your application. This plugin is installed and enabled by default when using the {product-title} Jenkins image. - -For {product-title} Pipelines within your project, you will must use the Jenkins Pipeline Build Strategy. This strategy defaults to using a `jenkinsfile` at the root of your source repository, but also provides the following configuration options: - -* An inline `jenkinsfile` field within your build configuration. -* A `jenkinsfilePath` field within your build configuration that references the location of the `jenkinsfile` to use relative to the source `contextDir`. - -[NOTE] -==== -The optional `jenkinsfilePath` field specifies the name of the file to use, relative to the source `contextDir`. If `contextDir` is omitted, it defaults to the root of the repository. If `jenkinsfilePath` is omitted, it defaults to `jenkinsfile`. -==== diff --git a/modules/builds-use-custom-builder-image.adoc b/modules/builds-use-custom-builder-image.adoc deleted file mode 100644 index 06e7f899f50d..000000000000 --- a/modules/builds-use-custom-builder-image.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-use-custom-builder-image_{context}"] -= Use custom builder image - -You can define a `BuildConfig` object that uses the custom strategy in conjunction with your custom builder image to execute your custom build logic. - -.Prerequisites - -* Define all the required inputs for new custom builder image. -* Build your custom builder image. - -.Procedure - -. Create a file named `buildconfig.yaml`. This file defines the `BuildConfig` object that is created in your project and executed: -+ -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: sample-custom-build - labels: - name: sample-custom-build - annotations: - template.alpha.openshift.io/wait-for-ready: 'true' -spec: - strategy: - type: Custom - customStrategy: - forcePull: true - from: - kind: ImageStreamTag - name: custom-builder-image:latest - namespace: <1> - output: - to: - kind: ImageStreamTag - name: sample-custom:latest ----- -<1> Specify your project name. - -. Create the `BuildConfig`: -+ -[source,terminal] ----- -$ oc create -f buildconfig.yaml ----- - -. Create a file named `imagestream.yaml`. This file defines the image stream to which the build will push the image: -+ -[source,yaml] ----- -kind: ImageStream -apiVersion: image.openshift.io/v1 -metadata: - name: sample-custom -spec: {} ----- - -. Create the imagestream: -+ -[source,terminal] ----- -$ oc create -f imagestream.yaml ----- - -. Run your custom build: -+ -[source,terminal] ----- -$ oc start-build sample-custom-build -F ----- -+ -When the build runs, it launches a pod running the custom builder image that was built earlier. The pod runs the `build.sh` logic that is defined as the entrypoint for the custom builder image. The `build.sh` logic invokes Buildah to build the `dockerfile.sample` that was embedded in the custom builder image, and then uses Buildah to push the new image to the `sample-custom image stream`. diff --git a/modules/builds-using-bitbucket-webhooks.adoc b/modules/builds-using-bitbucket-webhooks.adoc deleted file mode 100644 index b10cac521aba..000000000000 --- a/modules/builds-using-bitbucket-webhooks.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-bitbucket-webhooks_{context}"] -= Using Bitbucket webhooks - -link:https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html[Bitbucket webhooks] handle the call made by Bitbucket when a repository is updated. Similar to the previous triggers, you must specify a secret. The following example is a trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "Bitbucket" -bitbucket: - secretReference: - name: "mysecret" ----- - -The payload URL is returned as the Bitbucket Webhook URL by the `oc describe` command, and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//bitbucket ----- - -.Procedure - -. To configure a Bitbucket Webhook: - -.. Describe the 'BuildConfig' to get the webhook URL: -+ -[source,terminal] ----- -$ oc describe bc ----- - -.. Copy the webhook URL, replacing `` with your secret value. - -.. Follow the link:https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html[Bitbucket setup instructions] to paste the webhook URL into your Bitbucket repository settings. - -. Given a file containing a valid JSON payload, such as `payload.json`, you can -manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-Event-Key: repo:push" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//bitbucket ----- -+ -The `-k` argument is only necessary if your API server does not have a properly signed certificate. diff --git a/modules/builds-using-build-fields-as-environment-variables.adoc b/modules/builds-using-build-fields-as-environment-variables.adoc deleted file mode 100644 index a5f5637df94e..000000000000 --- a/modules/builds-using-build-fields-as-environment-variables.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-using-build-fields-as-environment-variables_{context}"] -= Using build fields as environment variables - -You can inject information about the build object by setting the `fieldPath` environment variable source to the `JsonPath` of the field from which you are interested in obtaining the value. - -[NOTE] -==== -Jenkins Pipeline strategy does not support `valueFrom` syntax for environment variables. -==== - -.Procedure - -* Set the `fieldPath` environment variable source to the `JsonPath` of the field from which you are interested in obtaining the value: -+ -[source,yaml] ----- -env: - - name: FIELDREF_ENV - valueFrom: - fieldRef: - fieldPath: metadata.name ----- diff --git a/modules/builds-using-build-volumes.adoc b/modules/builds-using-build-volumes.adoc deleted file mode 100644 index 90a499f783b8..000000000000 --- a/modules/builds-using-build-volumes.adoc +++ /dev/null @@ -1,127 +0,0 @@ -ifeval::["{context}" == "build-strategies-docker"] -:dockerstrategy: -endif::[] -ifeval::["{context}" == "build-strategies-s2i"] -:sourcestrategy: -endif::[] - -:_content-type: PROCEDURE -[id="builds-using-build-volumes_{context}"] -= Using build volumes - -You can mount build volumes to give running builds access to information that you don't want to persist in the output container image. - -Build volumes provide sensitive information, such as repository credentials, that the build environment or configuration only needs at build time. Build volumes are different from xref:../../cicd/builds/creating-build-inputs.adoc#builds-define-build-inputs_creating-build-inputs[build inputs], whose data can persist in the output container image. - -The mount points of build volumes, from which the running build reads data, are functionally similar to link:https://kubernetes.io/docs/concepts/storage/volumes/[pod volume mounts]. - -.Prerequisites -* You have xref:../../cicd/builds/creating-build-inputs.adoc#builds-input-secrets-configmaps_creating-build-inputs[added an input secret, config map, or both to a BuildConfig object]. - -.Procedure - -ifdef::dockerstrategy[] - -* In the `dockerStrategy` definition of the `BuildConfig` object, add any build volumes to the `volumes` array. For example: -+ -[source,yaml] ----- -spec: - dockerStrategy: - volumes: - - name: secret-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.ssh <2> - source: - type: Secret <3> - secret: - secretName: my-secret <4> - - name: settings-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.m2 <2> - source: - type: ConfigMap <3> - configMap: - name: my-config <4> - - name: my-csi-volume <1> - mounts: - - destinationPath: /opt/app-root/src/some_path <2> - source: - type: CSI <3> - csi: - driver: csi.sharedresource.openshift.io <5> - readOnly: true <6> - volumeAttributes: <7> - attribute: value ----- -<1> Required. A unique name. -<2> Required. The absolute path of the mount point. It must not contain `..` or `:` and doesn't collide with the destination path generated by the builder. The `/opt/app-root/src` is the default home directory for many Red Hat S2I-enabled images. -<3> Required. The type of source, `ConfigMap`, `Secret`, or `CSI`. -<4> Required. The name of the source. -<5> Required. The driver that provides the ephemeral CSI volume. -<6> Required. This value must be set to `true`. Provides a read-only volume. -<7> Optional. The volume attributes of the ephemeral CSI volume. Consult the CSI driver's documentation for supported attribute keys and values. - -[NOTE] -==== -The Shared Resource CSI Driver is supported as a Technology Preview feature. -==== - -endif::dockerstrategy[] - -ifdef::sourcestrategy[] - -* In the `sourceStrategy` definition of the `BuildConfig` object, add any build volumes to the `volumes` array. For example: -+ -[source,yaml] ----- -spec: - sourceStrategy: - volumes: - - name: secret-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.ssh <2> - source: - type: Secret <3> - secret: - secretName: my-secret <4> - - name: settings-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.m2 <2> - source: - type: ConfigMap <3> - configMap: - name: my-config <4> - - name: my-csi-volume <1> - mounts: - - destinationPath: /opt/app-root/src/some_path <2> - source: - type: CSI <3> - csi: - driver: csi.sharedresource.openshift.io <5> - readOnly: true <6> - volumeAttributes: <7> - attribute: value ----- - -<1> Required. A unique name. -<2> Required. The absolute path of the mount point. It must not contain `..` or `:` and doesn't collide with the destination path generated by the builder. The `/opt/app-root/src` is the default home directory for many Red Hat S2I-enabled images. -<3> Required. The type of source, `ConfigMap`, `Secret`, or `CSI`. -<4> Required. The name of the source. -<5> Required. The driver that provides the ephemeral CSI volume. -<6> Required. This value must be set to `true`. Provides a read-only volume. -<7> Optional. The volume attributes of the ephemeral CSI volume. Consult the CSI driver's documentation for supported attribute keys and values. - -[NOTE] -==== -The Shared Resource CSI Driver is supported as a Technology Preview feature. -==== - -endif::sourcestrategy[] - -ifeval::["{context}" == "build-strategies-docker"] -:!dockerstrategy: -endif::[] -ifeval::["{context}" == "build-strategies-s2i"] -:!sourcestrategy: -endif::[] diff --git a/modules/builds-using-cli-post-commit-build-hooks.adoc b/modules/builds-using-cli-post-commit-build-hooks.adoc deleted file mode 100644 index 75fdddda29c8..000000000000 --- a/modules/builds-using-cli-post-commit-build-hooks.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-cli-post-commit-build-hooks_{context}"] -= Using the CLI to set post commit build hooks - -The `oc set build-hook` command can be used to set the build hook for a build configuration. - -.Procedure - -. To set a command as the post-commit build hook: -+ -[source,terminal] ----- -$ oc set build-hook bc/mybc \ - --post-commit \ - --command \ - -- bundle exec rake test --verbose ----- -+ -. To set a script as the post-commit build hook: -+ -[source,terminal] ----- -$ oc set build-hook bc/mybc --post-commit --script="bundle exec rake test --verbose" ----- diff --git a/modules/builds-using-external-artifacts.adoc b/modules/builds-using-external-artifacts.adoc deleted file mode 100644 index 60a39fea15fa..000000000000 --- a/modules/builds-using-external-artifacts.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-using-external-artifacts_{context}"] -= External artifacts - -It is not recommended to store binary files in a source repository. Therefore, you must define a build which pulls additional files, such as Java `.jar` dependencies, during the build process. How this is done depends on the build strategy you are using. - -For a Source build strategy, you must put appropriate shell commands into the `assemble` script: - -.`.s2i/bin/assemble` File -[source,terminal] ----- -#!/bin/sh -APP_VERSION=1.0 -wget http://repository.example.com/app/app-$APP_VERSION.jar -O app.jar ----- - -.`.s2i/bin/run` File -[source,terminal] ----- -#!/bin/sh -exec java -jar app.jar ----- - -ifndef::openshift-online[] -For a Docker build strategy, you must modify the Dockerfile and invoke -shell commands with the link:https://docs.docker.com/engine/reference/builder/#run[`RUN` instruction]: - -.Excerpt of Dockerfile -[source,terminal] ----- -FROM jboss/base-jdk:8 - -ENV APP_VERSION 1.0 -RUN wget http://repository.example.com/app/app-$APP_VERSION.jar -O app.jar - -EXPOSE 8080 -CMD [ "java", "-jar", "app.jar" ] ----- -endif::[] - -In practice, you may want to use an environment variable for the file location so that the specific file to be downloaded can be customized using an environment variable defined on the `BuildConfig`, rather than updating the -ifndef::openshift-online[] -Dockerfile or -endif::[] -`assemble` script. - -You can choose between different methods of defining environment variables: - -* Using the `.s2i/environment` file] (only for a Source build strategy) -* Setting in `BuildConfig` -* Providing explicitly using `oc start-build --env` (only for builds that are triggered manually) - -//[role="_additional-resources"] -//.Additional resources -//* For more information on how to control which *_assemble_* and *_run_* script is -//used by a Source build, see Overriding builder image scripts. diff --git a/modules/builds-using-generic-webhooks.adoc b/modules/builds-using-generic-webhooks.adoc deleted file mode 100644 index 22b4ed0b0bed..000000000000 --- a/modules/builds-using-generic-webhooks.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-generic-webhooks_{context}"] -= Using generic webhooks - -Generic webhooks are invoked from any system capable of making a web request. As with the other webhooks, you must specify a secret, which is part of the URL that the caller must use to trigger the build. The secret ensures the uniqueness of the URL, preventing others from triggering the build. The following is an example trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "Generic" -generic: - secretReference: - name: "mysecret" - allowEnv: true <1> ----- -<1> Set to `true` to allow a generic webhook to pass in environment variables. - -.Procedure - -. To set up the caller, supply the calling system with the URL of the generic -webhook endpoint for your build: -+ -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The caller must invoke the webhook as a `POST` operation. - -. To invoke the webhook manually you can use `curl`: -+ -[source,terminal] ----- -$ curl -X POST -k https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The HTTP verb must be set to `POST`. The insecure `-k` flag is specified to ignore certificate validation. This second flag is not necessary if your cluster has properly signed certificates. -+ -The endpoint can accept an optional payload with the following format: -+ -[source,yaml] ----- -git: - uri: "" - ref: "" - commit: "" - author: - name: "" - email: "" - committer: - name: "" - email: "" - message: "" -env: <1> - - name: "" - value: "" ----- -<1> Similar to the `BuildConfig` environment variables, the environment variables defined here are made available to your build. If these variables collide with the `BuildConfig` environment variables, these variables take precedence. By default, environment variables passed by webhook are ignored. Set the `allowEnv` field to `true` on the webhook definition to enable this behavior. - -. To pass this payload using `curl`, define it in a file named `payload_file.yaml` and run: -+ -[source,terminal] ----- -$ curl -H "Content-Type: application/yaml" --data-binary @payload_file.yaml -X POST -k https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The arguments are the same as the previous example with the addition of a header and a payload. The `-H` argument sets the `Content-Type` header to `application/yaml` or `application/json` depending on your payload format. The `--data-binary` argument is used to send a binary payload with newlines intact with the `POST` request. - -[NOTE] -==== -{product-title} permits builds to be triggered by the generic webhook even if an invalid request payload is presented, for example, invalid content type, unparsable or invalid content, and so on. This behavior is maintained for backwards compatibility. If an invalid request payload is presented, {product-title} returns a warning in JSON format as part of its `HTTP 200 OK` response. -==== diff --git a/modules/builds-using-github-webhooks.adoc b/modules/builds-using-github-webhooks.adoc deleted file mode 100644 index 104cd1fd173a..000000000000 --- a/modules/builds-using-github-webhooks.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-github-webhooks_{context}"] -= Using GitHub webhooks - -GitHub webhooks handle the call made by GitHub when a repository is updated. When defining the trigger, you must specify a secret, which is part of the URL you supply to GitHub when configuring the webhook. - -Example GitHub webhook definition: - -[source,yaml] ----- -type: "GitHub" -github: - secretReference: - name: "mysecret" ----- - -[NOTE] -==== -The secret used in the webhook trigger configuration is not the same as `secret` field you encounter when configuring webhook in GitHub UI. The former is to make the webhook URL unique and hard to predict, the latter is an optional string field used to create HMAC hex digest of the body, which is sent as an `X-Hub-Signature` header. -==== - -The payload URL is returned as the GitHub Webhook URL by the `oc describe` -command (see Displaying Webhook URLs), and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//github ----- - -.Prerequisites - -* Create a `BuildConfig` from a GitHub repository. - -.Procedure - -. To configure a GitHub Webhook: - -.. After creating a `BuildConfig` from a GitHub repository, run: -+ -[source,terminal] ----- -$ oc describe bc/ ----- -+ -This generates a webhook GitHub URL that looks like: -+ -.Example output -[source,terminal] ----- -/buildconfigs//webhooks//github ----- - -.. Cut and paste this URL into GitHub, from the GitHub web console. - -.. In your GitHub repository, select *Add Webhook* from *Settings -> Webhooks*. - -.. Paste the URL output into the *Payload URL* field. - -.. Change the *Content Type* from GitHub's default `application/x-www-form-urlencoded` to `application/json`. - -.. Click *Add webhook*. -+ -You should see a message from GitHub stating that your webhook was successfully configured. -+ -Now, when you push a change to your GitHub repository, a new build automatically starts, and upon a successful build a new deployment starts. -+ -[NOTE] -==== -link:https://gogs.io[Gogs] supports the same webhook payload format as GitHub. Therefore, if you are using a Gogs server, you can define a GitHub webhook trigger on your `BuildConfig` and trigger it by your Gogs server as well. -==== - -. Given a file containing a valid JSON payload, such as `payload.json`, you can manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-GitHub-Event: push" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//github ----- -+ -The `-k` argument is only necessary if your API server does not have a properly -signed certificate. - -[role="_additional-resources"] -.Additional resources - -//* link:https://developer.github.com/webhooks/[GitHub] -* link:https://gogs.io[Gogs] diff --git a/modules/builds-using-gitlab-webhooks.adoc b/modules/builds-using-gitlab-webhooks.adoc deleted file mode 100644 index 6629fa6b2f2f..000000000000 --- a/modules/builds-using-gitlab-webhooks.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-gitlab-webhooks_{context}"] -= Using GitLab webhooks - -GitLab webhooks handle the call made by GitLab when a repository is updated. As with the GitHub triggers, you must specify a secret. The following example is a trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "GitLab" -gitlab: - secretReference: - name: "mysecret" ----- - -The payload URL is returned as the GitLab Webhook URL by the `oc describe` command, and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//gitlab ----- - -.Procedure - -. To configure a GitLab Webhook: - -.. Describe the `BuildConfig` to get the webhook URL: -+ -[source,terminal] ----- -$ oc describe bc ----- - -.. Copy the webhook URL, replacing `` with your secret value. - -.. Follow the link:https://docs.gitlab.com/ce/user/project/integrations/webhooks.html#webhooks[GitLab setup instructions] -to paste the webhook URL into your GitLab repository settings. - -. Given a file containing a valid JSON payload, such as `payload.json`, you can -manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-GitLab-Event: Push Hook" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//gitlab ----- -+ -The `-k` argument is only necessary if your API server does not have a properly -signed certificate. - -//// -[role="_additional-resources"] -.Additional resources -//// -//* link:https://docs.gitlab.com/ce/user/project/integrations/webhooks.html[GitLab] diff --git a/modules/builds-using-image-change-triggers.adoc b/modules/builds-using-image-change-triggers.adoc deleted file mode 100644 index d4af4c231e65..000000000000 --- a/modules/builds-using-image-change-triggers.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-image-change-triggers_{context}"] -= Using image change triggers - -As a developer, you can configure your build to run automatically every time a base image changes. - -You can use image change triggers to automatically invoke your build when a new version of an upstream image is available. For example, if a build is based on a RHEL image, you can trigger that build to run any time the RHEL image changes. As a result, the application image is always running on the latest RHEL base image. - -[NOTE] -==== -Image streams that point to container images in link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] only trigger a build once when the image stream tag becomes available and not on subsequent image updates. This is due to the lack of uniquely identifiable images in v1 container registries. -==== - -.Procedure - -. Define an `ImageStream` that points to the upstream image you want to use as a trigger: -+ -[source,yaml] ----- -kind: "ImageStream" -apiVersion: "v1" -metadata: - name: "ruby-20-centos7" ----- -+ -This defines the image stream that is tied to a container image repository located at `__/__/ruby-20-centos7`. The `` is defined as a service with the name `docker-registry` running in {product-title}. - -. If an image stream is the base image for the build, set the `from` field in the build strategy to point to the `ImageStream`: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "ruby-20-centos7:latest" ----- -+ -In this case, the `sourceStrategy` definition is consuming the `latest` tag of the image stream named `ruby-20-centos7` located within this namespace. - -. Define a build with one or more triggers that point to `ImageStreams`: -+ -[source,yaml] ----- -type: "ImageChange" <1> -imageChange: {} -type: "ImageChange" <2> -imageChange: - from: - kind: "ImageStreamTag" - name: "custom-image:latest" ----- -<1> An image change trigger that monitors the `ImageStream` and `Tag` as defined by the build strategy's `from` field. The `imageChange` object here must be empty. -<2> An image change trigger that monitors an arbitrary image stream. The `imageChange` part, in this case, must include a `from` field that references the `ImageStreamTag` to monitor. - -When using an image change trigger for the strategy image stream, the generated build is supplied with an immutable docker tag that points to the latest image corresponding to that tag. This new image reference is used by the strategy when it executes for the build. - -For other image change triggers that do not reference the strategy image stream, a new build is started, but the build strategy is not updated with a unique image reference. - -Since this example has an image change trigger for the strategy, the resulting build is: - -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "DockerImage" - name: "172.30.17.3:5001/mynamespace/ruby-20-centos7:" ----- - -This ensures that the triggered build uses the new image that was just pushed to the repository, and the build can be re-run any time with the same inputs. - -You can pause an image change trigger to allow multiple changes on the referenced image stream before a build is started. You can also set the `paused` attribute to true when initially adding an `ImageChangeTrigger` to a `BuildConfig` to prevent a build from being immediately triggered. - -[source,yaml] ----- -type: "ImageChange" -imageChange: - from: - kind: "ImageStreamTag" - name: "custom-image:latest" - paused: true ----- - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -In addition to setting the image field for all `Strategy` types, for custom builds, the `OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE` environment variable is checked. -If it does not exist, then it is created with the immutable image reference. If it does exist, then it is updated with the immutable image reference. -endif::[] - -If a build is triggered due to a webhook trigger or manual request, the build that is created uses the `` resolved from the `ImageStream` referenced by the `Strategy`. This ensures that builds are performed using consistent image tags for ease of reproduction. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-using-proxy-git-cloning.adoc b/modules/builds-using-proxy-git-cloning.adoc deleted file mode 100644 index 56c89d8be2da..000000000000 --- a/modules/builds-using-proxy-git-cloning.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-using-proxy-git-cloning_{context}"] -= Using a proxy - -If your Git repository can only be accessed using a proxy, you can define the proxy to use in the `source` section of the build configuration. You can configure both an HTTP and HTTPS proxy to use. Both fields are optional. Domains for which no proxying should be performed can also be specified in the `NoProxy` field. - -[NOTE] -==== -Your source URI must use the HTTP or HTTPS protocol for this to work. -==== - -[source,yaml] ----- -source: - git: - uri: "https://github.com/openshift/ruby-hello-world" - ref: "master" - httpProxy: http://proxy.example.com - httpsProxy: https://proxy.example.com - noProxy: somedomain.com, otherdomain.com ----- - -[NOTE] -==== -For Pipeline strategy builds, given the current restrictions with the Git plugin for Jenkins, any Git operations through the Git plugin do not leverage the HTTP or HTTPS proxy defined in the `BuildConfig`. The Git plugin only uses the proxy configured in the Jenkins UI at the Plugin Manager panel. This proxy is then used for all git interactions within Jenkins, across all jobs. -==== - -[role="_additional-resources"] -.Additional resources - -* You can find instructions on how to configure proxies through the Jenkins UI at link:https://wiki.jenkins-ci.org/display/JENKINS/JenkinsBehindProxy[JenkinsBehindProxy]. diff --git a/modules/builds-using-secrets-as-environment-variables.adoc b/modules/builds-using-secrets-as-environment-variables.adoc deleted file mode 100644 index 9ffb37083e5d..000000000000 --- a/modules/builds-using-secrets-as-environment-variables.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-using-secrets-as-environment-variables_{context}"] -= Using secrets as environment variables - -You can make key values from secrets available as environment variables using the `valueFrom` syntax. - -[IMPORTANT] -==== -This method shows the secrets as plain text in the output of the build pod console. To avoid this, use input secrets and config maps instead. -==== - -.Procedure - -* To use a secret as an environment variable, set the `valueFrom` syntax: -+ -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: secret-example-bc -spec: - strategy: - sourceStrategy: - env: - - name: MYVAL - valueFrom: - secretKeyRef: - key: myval - name: mysecret ----- diff --git a/modules/builds-using-secrets.adoc b/modules/builds-using-secrets.adoc deleted file mode 100644 index a864e28eb8e9..000000000000 --- a/modules/builds-using-secrets.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - - -:_content-type: PROCEDURE -[id="builds-using-secrets_{context}"] -= Using secrets - -After creating secrets, you can create a pod to reference your secret, get logs, and delete the pod. - -.Procedure - -. Create the pod to reference your secret: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. Get the logs: -+ -[source,terminal] ----- -$ oc logs secret-example-pod ----- - -. Delete the pod: -+ -[source,terminal] ----- -$ oc delete pod secret-example-pod ----- - -[role="_additional-resources"] -.Additional resources - -* Example YAML files with secret data: -+ -.YAML Secret That Will Create Four Files -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret -data: - username: <1> - password: <2> -stringData: - hostname: myapp.mydomain.com <3> - secret.properties: |- <4> - property1=valueA - property2=valueB ----- -<1> File contains decoded values. -<2> File contains decoded values. -<3> File contains the provided string. -<4> File contains the provided data. -+ -.YAML of a pod populating files in a volume with secret data -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "cat /etc/secret-volume/*" ] - volumeMounts: - # name must match the volume name below - - name: secret-volume - mountPath: /etc/secret-volume - readOnly: true - volumes: - - name: secret-volume - secret: - secretName: test-secret - restartPolicy: Never ----- -+ -.YAML of a pod populating environment variables with secret data -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "export" ] - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: - name: test-secret - key: username - restartPolicy: Never ----- -+ -.YAML of a Build Config Populating Environment Variables with Secret Data -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: secret-example-bc -spec: - strategy: - sourceStrategy: - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: - name: test-secret - key: username ----- diff --git a/modules/builds-webhook-triggers.adoc b/modules/builds-webhook-triggers.adoc deleted file mode 100644 index 38c80b0d3c5d..000000000000 --- a/modules/builds-webhook-triggers.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-webhook-triggers_{context}"] -= Webhook triggers - -Webhook triggers allow you to trigger a new build by sending a request to the {product-title} API endpoint. You can define these triggers using GitHub, GitLab, Bitbucket, or Generic webhooks. - -Currently, {product-title} webhooks only support the analogous versions of the push event for each of the Git-based Source Code Management (SCM) systems. All other event types are ignored. - -When the push events are processed, the {product-title} control plane host confirms if the branch reference inside the event matches the branch reference in the corresponding `BuildConfig`. If so, it then checks out the exact commit reference noted in the webhook event on the {product-title} build. If they do not match, no build is triggered. - -[NOTE] -==== -`oc new-app` and `oc new-build` create GitHub and Generic webhook triggers automatically, but any other needed webhook triggers must be added manually. You can manually add triggers by setting triggers. -==== - -For all webhooks, you must define a secret with a key named `WebHookSecretKey` and the value being the value to be supplied when invoking the webhook. The webhook definition must then reference the secret. The secret ensures the uniqueness of the URL, preventing others from triggering the build. The value of the key is compared to the secret provided during the webhook invocation. - -For example here is a GitHub webhook with a reference to a secret named `mysecret`: - -[source,yaml] ----- -type: "GitHub" -github: - secretReference: - name: "mysecret" ----- - -The secret is then defined as follows. Note that the value of the secret is base64 encoded as is required for any `data` field of a `Secret` object. - -[source,yaml] ----- -- kind: Secret - apiVersion: v1 - metadata: - name: mysecret - creationTimestamp: - data: - WebHookSecretKey: c2VjcmV0dmFsdWUx ----- diff --git a/modules/byoh-configuring.adoc b/modules/byoh-configuring.adoc deleted file mode 100644 index 0a80416ac66e..000000000000 --- a/modules/byoh-configuring.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/byoh-windows-instance.adoc - -:_content-type: PROCEDURE -[id="configuring-byoh-windows-instance"] -= Configuring a BYOH Windows instance - -Creating a BYOH Windows instance requires creating a config map in the Windows Machine Config Operator (WMCO) namespace. - -.Prerequisites -Any Windows instances that are to be attached to the cluster as a node must fulfill the following requirements: - -* The instance must be on the same network as the Linux worker nodes in the cluster. -* Port 22 must be open and running an SSH server. -* The default shell for the SSH server must be the link:https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows[Windows Command shell], or `cmd.exe`. -* Port 10250 must be open for log collection. -* An administrator user is present with the private key used in the secret set as an authorized SSH key. -* If you are creating a BYOH Windows instance for an installer-provisioned infrastructure (IPI) AWS cluster, you must add a tag to the AWS instance that matches the `spec.template.spec.value.tag` value in the compute machine set for your worker nodes. For example, `kubernetes.io/cluster/: owned` or `kubernetes.io/cluster/: shared`. -* If you are creating a BYOH Windows instance on vSphere, communication with the internal API server must be enabled. -* The hostname of the instance must follow the link:https://datatracker.ietf.org/doc/html/rfc1123[RFC 1123] DNS label requirements, which include the following standards: -** Contains only lowercase alphanumeric characters or '-'. -** Starts with an alphanumeric character. -** Ends with an alphanumeric character. - -[NOTE] -==== -Windows instances deployed by the WMCO are configured with the containerd container runtime. Because the WMCO installs and manages the runtime, it is recommended that you not manually install containerd on nodes. -==== - -.Procedure -. Create a ConfigMap named `windows-instances` in the WMCO namespace that describes the Windows instances to be added. -+ -[NOTE] -==== -Format each entry in the config map's data section by using the address as the key while formatting the value as `username=`. -==== -+ -.Example config map -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: windows-instances - namespace: openshift-windows-machine-config-operator -data: - 10.1.42.1: |- <1> - username=Administrator <2> - instance.example.com: |- - username=core ----- -<1> The address that the WMCO uses to reach the instance over SSH, either a DNS name or an IPv4 address. A DNS PTR record must exist for this address. It is recommended that you use a DNS name with your BYOH instance if your organization uses DHCP to assign IP addresses. If not, you need to update the `windows-instances` ConfigMap whenever the instance is assigned a new IP address. -<2> The name of the administrator user created in the prerequisites. - diff --git a/modules/byoh-removal.adoc b/modules/byoh-removal.adoc deleted file mode 100644 index aa36b2487712..000000000000 --- a/modules/byoh-removal.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/byoh-windows-instance.adoc - -[id="removing-byoh-windows-instance"] -= Removing BYOH Windows instances -You can remove BYOH instances attached to the cluster by deleting the instance's entry in the config map. Deleting an instance reverts that instance back to its state prior to adding to the cluster. Any logs and container runtime artifacts are not added to these instances. - -For an instance to be cleanly removed, it must be accessible with the current private key provided to WMCO. For example, to remove the `10.1.42.1` instance from the previous example, the config map would be changed to the following: - -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: windows-instances - namespace: openshift-windows-machine-config-operator -data: - instance.example.com: |- - username=core ----- - -Deleting `windows-instances` is viewed as a request to deconstruct all Windows instances added as nodes. diff --git a/modules/ca-bundle-replacing.adoc b/modules/ca-bundle-replacing.adoc deleted file mode 100644 index 1b96d175e8b9..000000000000 --- a/modules/ca-bundle-replacing.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/updating-ca-bundle.adoc - -:_content-type: PROCEDURE -[id="ca-bundle-replacing_{context}"] -= Replacing the CA Bundle certificate - -.Procedure - -. Create a config map that includes the root CA certificate used to sign the wildcard certificate: -+ -[source,terminal] ----- -$ oc create configmap custom-ca \ - --from-file=ca-bundle.crt= \//<1> - -n openshift-config ----- -<1> `` is the path to the CA certificate bundle on your local file system. - -. Update the cluster-wide proxy configuration with the newly created config map: -+ -[source,terminal] ----- -$ oc patch proxy/cluster \ - --type=merge \ - --patch='{"spec":{"trustedCA":{"name":"custom-ca"}}}' ----- diff --git a/modules/ca-bundle-understanding.adoc b/modules/ca-bundle-understanding.adoc deleted file mode 100644 index 949b7f2c768d..000000000000 --- a/modules/ca-bundle-understanding.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/updating-ca-bundle.adoc - -:_content-type: SNIPPET -[id="ca-bundle-understanding_{context}"] -= Understanding the CA Bundle certificate - -Proxy certificates allow users to specify one or more custom certificate authority (CA) used by platform components when making egress connections. - -The `trustedCA` field of the Proxy object is a reference to a config map that contains a user-provided trusted certificate authority (CA) bundle. This bundle is merged with the {op-system-first} trust bundle and injected into the trust store of platform components that make egress HTTPS calls. For example, `image-registry-operator` calls an external image registry to download images. If `trustedCA` is not specified, only the {op-system} trust bundle is used for proxied HTTPS connections. Provide custom CA certificates to the {op-system} trust bundle if you want to use your own certificate infrastructure. - -The `trustedCA` field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key `ca-bundle.crt` and copying it to a config map named `trusted-ca-bundle` in the `openshift-config-managed` namespace. The namespace for the config map referenced by `trustedCA` is `openshift-config`: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-ca-bundle - namespace: openshift-config -data: - ca-bundle.crt: | - -----BEGIN CERTIFICATE----- - Custom CA certificate bundle. - -----END CERTIFICATE----- ----- diff --git a/modules/capi-machine-set-creating.adoc b/modules/capi-machine-set-creating.adoc deleted file mode 100644 index 262411e085aa..000000000000 --- a/modules/capi-machine-set-creating.adoc +++ /dev/null @@ -1,206 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: PROCEDURE -[id="capi-machine-set-creating_{context}"] -= Creating a Cluster API compute machine set - -You can create compute machine sets that use the Cluster API to dynamically manage the machine compute resources for specific workloads of your choice. - -.Prerequisites - -* Deploy an {product-title} cluster. -* Enable the use of the Cluster API. -* Install the OpenShift CLI (`oc`). -* Log in to `oc` as a user with `cluster-admin` permission. - -.Procedure - -. Create a YAML file that contains the cluster custom resource (CR) and is named `.yaml`. -+ -If you are not sure which value to set for the `` parameter, you can check the value for an existing Machine API compute machine set in your cluster. - -.. To list the Machine API compute machine sets, run the following command: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api <1> ----- -<1> Specify the `openshift-machine-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1d 0 0 55m -agl030519-vplxk-worker-us-east-1e 0 0 55m -agl030519-vplxk-worker-us-east-1f 0 0 55m ----- - -.. To display the contents of a specific compute machine set CR, run the following command: -+ -[source,terminal] ----- -$ oc get machineset \ --n openshift-machine-api \ --o yaml ----- -+ -.Example output -[source,yaml] ----- -... -template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: agl030519-vplxk <1> - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: agl030519-vplxk-worker-us-east-1a -... ----- -<1> The cluster ID, which you use for the `` parameter. - -. Create the cluster CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the cluster CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get cluster ----- -+ -.Example output -[source,terminal] ----- -NAME PHASE AGE VERSION - Provisioning 4h6m ----- - -. Create a YAML file that contains the infrastructure CR and is named `.yaml`. - -. Create the infrastructure CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the infrastructure CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get ----- -+ -where `` is the value that corresponds to your platform. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER READY VPC BASTION IP - true ----- - -. Create a YAML file that contains the machine template CR and is named `.yaml`. - -. Create the machine template CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the machine template CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get ----- -+ -where `` is the value that corresponds to your platform. -+ -.Example output -[source,terminal] ----- -NAME AGE - 77m ----- - -. Create a YAML file that contains the compute machine set CR and is named `.yaml`. - -. Create the compute machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the compute machine set CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-cluster-api <1> ----- -<1> Specify the `openshift-cluster-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER REPLICAS READY AVAILABLE AGE VERSION - 1 1 1 17m ----- -+ -When the new compute machine set is available, the `REPLICAS` and `AVAILABLE` values match. If the compute machine set is not available, wait a few minutes and run the command again. - -.Verification - -* To verify that the compute machine set is creating machines according to your desired configuration, you can review the lists of machines and nodes in the cluster. - -** To view the list of Cluster API machines, run the following command: -+ -[source,terminal] ----- -$ oc get machine -n openshift-cluster-api <1> ----- -<1> Specify the `openshift-cluster-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION -- ..compute.internal Running 8m23s ----- - -** To view the list of nodes, run the following command: -+ -[source,terminal] ----- -$ oc get node ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -..compute.internal Ready worker 5h14m v1.27.3 -..compute.internal Ready master 5h19m v1.27.3 -..compute.internal Ready worker 7m v1.27.3 ----- diff --git a/modules/capi-troubleshooting.adoc b/modules/capi-troubleshooting.adoc deleted file mode 100644 index d81354af80b0..000000000000 --- a/modules/capi-troubleshooting.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-troubleshooting_{context}"] -= Troubleshooting clusters that use the Cluster API - -Use the information in this section to understand and recover from issues you might encounter. Generally, troubleshooting steps for problems with the Cluster API are similar to those steps for problems with the Machine API. - -The Cluster CAPI Operator and its operands are provisioned in the `openshift-cluster-api` namespace, whereas the Machine API uses the `openshift-machine-api` namespace. When using `oc` commands that reference a namespace, be sure to reference the correct one. - -[id="ts-capi-cli_{context}"] -== CLI commands return Cluster API machines - -For clusters that use the Cluster API, `oc` commands such as `oc get machine` return results for Cluster API machines. Because the letter `c` precedes the letter `m` alphabetically, Cluster API machines appear in the return before Machine API machines do. - -* To list only Machine API machines, use the fully qualified name `machines.machine.openshift.io` when running the `oc get machine` command: -+ -[source,terminal] ----- -$ oc get machines.machine.openshift.io ----- - -* To list only Cluster API machines, use the fully qualified name `machines.cluster.x-k8s.io` when running the `oc get machine` command: -+ -[source,terminal] ----- -$ oc get machines.cluster.x-k8s.io ----- diff --git a/modules/capi-yaml-cluster.adoc b/modules/capi-yaml-cluster.adoc deleted file mode 100644 index 6ae11078445e..000000000000 --- a/modules/capi-yaml-cluster.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-cluster_{context}"] -= Sample YAML for a Cluster API cluster resource - -The cluster resource defines the name and infrastructure provider for the cluster and is managed by the Cluster API. This resource has the same structure for all providers. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: <2> - name: <1> - namespace: openshift-cluster-api ----- -<1> Specify the name of the cluster. -<2> Specify the infrastructure kind for the cluster. Valid values are: -+ --- -* `AWSCluster`: The cluster is running on Amazon Web Services (AWS). -* `GCPCluster`: The cluster is running on Google Cloud Platform (GCP). --- \ No newline at end of file diff --git a/modules/capi-yaml-infrastructure-aws.adoc b/modules/capi-yaml-infrastructure-aws.adoc deleted file mode 100644 index b835fa3c3437..000000000000 --- a/modules/capi-yaml-infrastructure-aws.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-infrastructure-aws_{context}"] -= Sample YAML for a Cluster API infrastructure resource on Amazon Web Services - -The infrastructure resource is provider-specific and defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. The compute machine set references this resource when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AWSCluster <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - region: <3> ----- -<1> Specify the infrastructure kind for the cluster. This value must match the value for your platform. -<2> Specify the name of the cluster. -<3> Specify the AWS region. \ No newline at end of file diff --git a/modules/capi-yaml-infrastructure-gcp.adoc b/modules/capi-yaml-infrastructure-gcp.adoc deleted file mode 100644 index bffc4d600b28..000000000000 --- a/modules/capi-yaml-infrastructure-gcp.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-infrastructure-gcp_{context}"] -= Sample YAML for a Cluster API infrastructure resource on Google Cloud Platform - -The infrastructure resource is provider-specific and defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. The compute machine set references this resource when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: GCPCluster <1> -metadata: - name: <2> -spec: - network: - name: -network <2> - project: <3> - region: <4> ----- -<1> Specify the infrastructure kind for the cluster. This value must match the value for your platform. -<2> Specify the name of the cluster. -<3> Specify the GCP project name. -<4> Specify the GCP region. \ No newline at end of file diff --git a/modules/capi-yaml-machine-set-aws.adoc b/modules/capi-yaml-machine-set-aws.adoc deleted file mode 100644 index a6bc4e18599a..000000000000 --- a/modules/capi-yaml-machine-set-aws.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-set-aws_{context}"] -= Sample YAML for a Cluster API compute machine set resource on Amazon Web Services - -The compute machine set resource defines additional properties of the machines that it creates. The compute machine set also references the infrastructure resource and machine template when creating machines. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1alpha4 -kind: MachineSet -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - clusterName: <2> - replicas: 1 - selector: - matchLabels: - test: example - template: - metadata: - labels: - test: example - spec: - bootstrap: - dataSecretName: worker-user-data <3> - clusterName: <2> - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 - kind: AWSMachineTemplate <4> - name: <2> ----- -<1> Specify a name for the compute machine set. -<2> Specify the name of the cluster. -<3> For the Cluster API Technology Preview, the Operator can use the worker user data secret from `openshift-machine-api` namespace. -<4> Specify the machine template kind. This value must match the value for your platform. \ No newline at end of file diff --git a/modules/capi-yaml-machine-set-gcp.adoc b/modules/capi-yaml-machine-set-gcp.adoc deleted file mode 100644 index fa1b5076a334..000000000000 --- a/modules/capi-yaml-machine-set-gcp.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-set-gcp_{context}"] -= Sample YAML for a Cluster API compute machine set resource on Google Cloud Platform - -The compute machine set resource defines additional properties of the machines that it creates. The compute machine set also references the infrastructure resource and machine template when creating machines. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineSet -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - clusterName: <2> - replicas: 1 - selector: - matchLabels: - test: test - template: - metadata: - labels: - test: test - spec: - bootstrap: - dataSecretName: worker-user-data <3> - clusterName: <2> - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: GCPMachineTemplate <4> - name: <1> - failureDomain: <5> ----- -<1> Specify a name for the compute machine set. -<2> Specify the name of the cluster. -<3> For the Cluster API Technology Preview, the Operator can use the worker user data secret from `openshift-machine-api` namespace. -<4> Specify the machine template kind. This value must match the value for your platform. -<5> Specify the failure domain within the GCP region. \ No newline at end of file diff --git a/modules/capi-yaml-machine-template-aws.adoc b/modules/capi-yaml-machine-template-aws.adoc deleted file mode 100644 index 2e40623bc553..000000000000 --- a/modules/capi-yaml-machine-template-aws.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-template-aws_{context}"] -= Sample YAML for a Cluster API machine template resource on Amazon Web Services - -The machine template resource is provider-specific and defines the basic properties of the machines that a compute machine set creates. The compute machine set references this template when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 -kind: AWSMachineTemplate <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - template: - spec: <3> - uncompressedUserData: true - iamInstanceProfile: .... - instanceType: m5.large - cloudInit: - insecureSkipSecretsManager: true - ami: - id: .... - subnet: - filters: - - name: tag:Name - values: - - ... - additionalSecurityGroups: - - filters: - - name: tag:Name - values: - - ... ----- -<1> Specify the machine template kind. This value must match the value for your platform. -<2> Specify a name for the machine template. -<3> Specify the details for your environment. The values here are examples. \ No newline at end of file diff --git a/modules/capi-yaml-machine-template-gcp.adoc b/modules/capi-yaml-machine-template-gcp.adoc deleted file mode 100644 index 5a43a909cf51..000000000000 --- a/modules/capi-yaml-machine-template-gcp.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-template-gcp_{context}"] -= Sample YAML for a Cluster API machine template resource on Google Cloud Platform - -The machine template resource is provider-specific and defines the basic properties of the machines that a compute machine set creates. The compute machine set references this template when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: GCPMachineTemplate <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - template: - spec: <3> - rootDeviceType: pd-ssd - rootDeviceSize: 128 - instanceType: n1-standard-4 - image: projects/rhcos-cloud/global/images/rhcos-411-85-202203181601-0-gcp-x86-64 - subnet: -worker-subnet - serviceAccounts: - email: - scopes: - - https://www.googleapis.com/auth/cloud-platform - additionalLabels: - kubernetes-io-cluster-: owned - additionalNetworkTags: - - -worker - ipForwarding: Disabled ----- -<1> Specify the machine template kind. This value must match the value for your platform. -<2> Specify a name for the machine template. -<3> Specify the details for your environment. The values here are examples. diff --git a/modules/cco-ccoctl-configuring.adoc b/modules/cco-ccoctl-configuring.adoc deleted file mode 100644 index 51397d06e482..000000000000 --- a/modules/cco-ccoctl-configuring.adoc +++ /dev/null @@ -1,232 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc -// * installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc -// * installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.doc -// * installing/installing_alibaba/manually-creating-alibaba-ram.adoc -// * installing/installing_nutanix/preparing-to-install-on-nutanix.adoc -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc - -ifeval::["{context}" == "cco-mode-sts"] -:aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "manually-creating-alibaba-ram"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-nutanix"] -:nutanix: -endif::[] -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-ibm-power-vs"] -:ibm-power-vs: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-configuring_{context}"] -ifndef::update[= Configuring the Cloud Credential Operator utility] -ifdef::update[= Configuring the Cloud Credential Operator utility for a cluster update] - -//This applies only to Alibaba Cloud. -ifdef::alibabacloud[] -To assign RAM users and policies that provide long-lived RAM AccessKeys (AKs) for each in-cluster component, extract and prepare the Cloud Credential Operator (CCO) utility (`ccoctl`) binary. -endif::alibabacloud[] - -//Nutanix-only intro because it needs context in its install procedure. -ifdef::nutanix[] -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). To install a cluster on Nutanix, you must set the CCO to `manual` mode as part of the installation process. -endif::nutanix[] -ifdef::ibm-power-vs[] -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). To install a cluster on {ibmpowerProductName} Virtual Server, you must set the CCO to `manual` mode as part of the installation process. -endif::ibm-power-vs[] - -//Alibaba Cloud uses ccoctl, but creates different kinds of resources than other clouds, so this applies to everyone else. The upgrade procs also have a different intro, so they are excluded here. -ifndef::alibabacloud,update[] -To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility (`ccoctl`) binary. -endif::alibabacloud,update[] - -//Intro for the upgrade procs. -ifdef::update[] -To upgrade a cluster that uses the Cloud Credential Operator (CCO) in manual mode to create and manage cloud credentials from outside of the cluster, extract and prepare the CCO utility (`ccoctl`) binary. -endif::update[] - -[NOTE] -==== -The `ccoctl` utility is a Linux binary that must run in a Linux environment. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator access. -* You have installed the OpenShift CLI (`oc`). - -//Upgrade prereqs -ifdef::update[] -* Your cluster was configured using the `ccoctl` utility to create and manage cloud credentials from outside of the cluster. -endif::update[] - -//AWS permissions needed when running ccoctl during install (I think we can omit from upgrade, since they already have an appropriate AWS account if they are upgrading). -ifdef::aws-sts[] -* You have created an AWS account for the `ccoctl` utility to use with the following permissions: -+ -.Required AWS permissions -[cols="a,a"] -|==== -|Permission type |Required permissions - -|`iam` permissions -|* `iam:CreateOpenIDConnectProvider` -* `iam:CreateRole` -* `iam:DeleteOpenIDConnectProvider` -* `iam:DeleteRole` -* `iam:DeleteRolePolicy` -* `iam:GetOpenIDConnectProvider` -* `iam:GetRole` -* `iam:GetUser` -* `iam:ListOpenIDConnectProviders` -* `iam:ListRolePolicies` -* `iam:ListRoles` -* `iam:PutRolePolicy` -* `iam:TagOpenIDConnectProvider` -* `iam:TagRole` - -|`s3` permissions -|* `s3:CreateBucket` -* `s3:DeleteBucket` -* `s3:DeleteObject` -* `s3:GetBucketAcl` -* `s3:GetBucketTagging` -* `s3:GetObject` -* `s3:GetObjectAcl` -* `s3:GetObjectTagging` -* `s3:ListBucket` -* `s3:PutBucketAcl` -* `s3:PutBucketPolicy` -* `s3:PutBucketPublicAccessBlock` -* `s3:PutBucketTagging` -* `s3:PutObject` -* `s3:PutObjectAcl` -* `s3:PutObjectTagging` - -|`cloudfront` permissions -|* `cloudfront:ListCloudFrontOriginAccessIdentities` -* `cloudfront:ListDistributions` -* `cloudfront:ListTagsForResource` - -|==== -+ -If you plan to store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL, the AWS account that runs the `ccoctl` utility requires the following additional permissions: -+ --- -* `cloudfront:CreateCloudFrontOriginAccessIdentity` -* `cloudfront:CreateDistribution` -* `cloudfront:DeleteCloudFrontOriginAccessIdentity` -* `cloudfront:DeleteDistribution` -* `cloudfront:GetCloudFrontOriginAccessIdentity` -* `cloudfront:GetCloudFrontOriginAccessIdentityConfig` -* `cloudfront:GetDistribution` -* `cloudfront:TagResource` -* `cloudfront:UpdateDistribution` --- -+ -[NOTE] -==== -These additional permissions support the use of the `--create-private-s3-bucket` option when processing credentials requests with the `ccoctl aws create-all` command. -==== -endif::aws-sts[] - -.Procedure - -. Obtain the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- - -. Obtain the CCO container image from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ CCO_IMAGE=$(oc adm release info --image-for='cloud-credential-operator' $RELEASE_IMAGE -a ~/.pull-secret) ----- -+ -[NOTE] -==== -Ensure that the architecture of the `$RELEASE_IMAGE` matches the architecture of the environment in which you will use the `ccoctl` tool. -==== - -. Extract the `ccoctl` binary from the CCO container image within the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc image extract $CCO_IMAGE --file="/usr/bin/ccoctl" -a ~/.pull-secret ----- - -. Change the permissions to make `ccoctl` executable by running the following command: -+ -[source,terminal] ----- -$ chmod 775 ccoctl ----- - -.Verification - -* To verify that `ccoctl` is ready to use, display the help file by running the following command: -+ -[source,terminal] ----- -$ ccoctl --help ----- -+ -.Output of `ccoctl --help` -[source,terminal] ----- -OpenShift credentials provisioning tool - -Usage: - ccoctl [command] - -Available Commands: - alibabacloud Manage credentials objects for alibaba cloud - aws Manage credentials objects for AWS cloud - gcp Manage credentials objects for Google cloud - help Help about any command - ibmcloud Manage credentials objects for IBM Cloud - nutanix Manage credentials objects for Nutanix - -Flags: - -h, --help help for ccoctl - -Use "ccoctl [command] --help" for more information about a command. ----- - -ifeval::["{context}" == "cco-mode-sts"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "manually-creating-alibaba-ram"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-nutanix"] -:!nutanix: -endif::[] -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-ibm-power-vs"] -:!ibm-power-vs: -endif::[] diff --git a/modules/cco-ccoctl-creating-at-once.adoc b/modules/cco-ccoctl-creating-at-once.adoc deleted file mode 100644 index 757d474040a1..000000000000 --- a/modules/cco-ccoctl-creating-at-once.adoc +++ /dev/null @@ -1,351 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc -// * installing/installing_alibaba/manually-creating-alibaba-ram.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc - -ifeval::["{context}" == "cco-mode-sts"] -:aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:alibabacloud-customizations: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:alibabacloud-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-creating-at-once_{context}"] -ifdef::aws-sts[] -= Creating AWS resources with a single command - -If you do not need to review the JSON files that the `ccoctl` tool creates before modifying AWS resources, and if the process the `ccoctl` tool uses to create AWS resources automatically meets the requirements of your organization, you can use the `ccoctl aws create-all` command to automate the creation of AWS resources. - -Otherwise, you can create the AWS resources individually. - -//to-do if possible: xref to modules/cco-ccoctl-creating-individually.adoc for `create the AWS resources individually` -endif::aws-sts[] -ifdef::google-cloud-platform[] -= Creating GCP resources with the Cloud Credential Operator utility - -You can use the `ccoctl gcp create-all` command to automate the creation of GCP resources. -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -[id="cco-ccoctl-creating-at-once_{context}"] -= Creating credentials for {product-title} components with the ccoctl tool - -You can use the {product-title} Cloud Credential Operator (CCO) utility to automate the creation of Alibaba Cloud RAM users and policies for each in-cluster component. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. -==== - -.Prerequisites - -You must have: - -* Extracted and prepared the `ccoctl` binary. -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -* Created a RAM user with sufficient permission to create the {product-title} cluster. -* Added the AccessKeyID (`access_key_id`) and AccessKeySecret (`access_key_secret`) of that RAM user into the link:https://www.alibabacloud.com/help/en/doc-detail/311667.htm#h2-sls-mfm-3p3[`~/.alibabacloud/credentials` file] on your local computer. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -.Procedure - -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. Set the `$RELEASE_IMAGE` variable by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -. Extract the list of `CredentialsRequest` objects from the {product-title} release image by running the following command: -+ -[source,terminal] -ifdef::aws-sts[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=aws \ ---to=/credrequests \ <1> ---from=quay.io//ocp-release: ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=gcp \ ---to=/credrequests \ <1> -quay.io//ocp-release: ----- -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=alibabacloud \ ---to=/credrequests \ <1> -$RELEASE_IMAGE ----- -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. -+ -[NOTE] -==== -This command can take a few moments to run. -==== - -ifdef::aws-sts[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on AWS -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cloud-credential-operator_05-iam-ro-credentialsrequest.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <5> -0000_50_cluster-storage-operator_03_credentials_request_aws.yaml <6> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Cloud Credential Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Network Operator CR is required. -<6> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::aws-sts[] -ifdef::google-cloud-platform[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on GCP -+ -[source,terminal] ----- -0000_26_cloud-controller-manager-operator_16_credentialsrequest-gcp.yaml <1> -0000_30_machine-api-operator_00_credentials-request.yaml <2> -0000_50_cloud-credential-operator_05-gcp-ro-credentialsrequest.yaml <3> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-gcs.yaml <4> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <5> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <6> -0000_50_cluster-storage-operator_03_credentials_request_gcp.yaml <7> ----- -+ -<1> The Cloud Controller Manager Operator CR is required. -<2> The Machine API Operator CR is required. -<3> The Cloud Credential Operator CR is required. -<4> The Image Registry Operator CR is required. -<5> The Ingress Operator CR is required. -<6> The Network Operator CR is required. -<7> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on Alibaba Cloud -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-alibaba.yaml <2> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <3> -0000_50_cluster-storage-operator_03_credentials_request_alibaba.yaml <4> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Image Registry Operator CR is required. -<3> The Ingress Operator CR is required. -<4> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -ifdef::aws-sts,google-cloud-platform[] -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: -+ -endif::aws-sts,google-cloud-platform[] -ifdef::aws-sts[] -[source,terminal] ----- -$ ccoctl aws create-all \ - --name= \// <1> - --region= \// <2> - --credentials-requests-dir=/credrequests \// <3> - --output-dir= \// <4> - --create-private-s3-bucket <5> ----- -<1> Specify the name used to tag any cloud resources that are created for tracking. -<2> Specify the AWS region in which cloud resources will be created. -<3> Specify the directory containing the files for the component `CredentialsRequest` objects. -<4> Optional: Specify the directory in which you want the `ccoctl` utility to create objects. By default, the utility creates objects in the directory in which the commands are run. -<5> Optional: By default, the `ccoctl` utility stores the OpenID Connect (OIDC) configuration files in a public S3 bucket and uses the S3 URL as the public OIDC endpoint. To store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL instead, use the `--create-private-s3-bucket` parameter. -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,terminal] ----- -$ ccoctl gcp create-all \ ---name= \ ---region= \ ---project= \ ---credentials-requests-dir=/credrequests ----- -+ -where: -+ --- -** `` is the user-defined name for all created GCP resources used for tracking. -** `` is the GCP region in which cloud resources will be created. -** `` is the GCP project ID in which cloud resources will be created. -** `/credrequests` is the directory containing the files of `CredentialsRequest` manifests to create GCP service accounts. --- -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -endif::google-cloud-platform[] - -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: - -.. Run the following command to use the tool: -+ -[source,terminal] ----- -$ ccoctl alibabacloud create-ram-users \ ---name \ ---region= \ ---credentials-requests-dir=/credrequests \ ---output-dir= ----- -+ -where: -+ --- -** `` is the name used to tag any cloud resources that are created for tracking. -** `` is the Alibaba Cloud region in which cloud resources will be created. -** `/credrequests` is the directory containing the files for the component `CredentialsRequest` objects. -** `` is the directory where the generated component credentials secrets will be placed. --- -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -+ -.Example output -+ -[source,terminal] ----- -2022/02/11 16:18:26 Created RAM User: user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:27 Ready for creating new ram policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy -2022/02/11 16:18:27 RAM policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy has created -2022/02/11 16:18:28 Policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy has attached on user user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:29 Created access keys for RAM User: user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:29 Saved credentials configuration to: user1-alicloud/manifests/openshift-machine-api-alibabacloud-credentials-credentials.yaml -... ----- -+ -[NOTE] -==== -A RAM user can have up to two AccessKeys at the same time. If you run `ccoctl alibabacloud create-ram-users` more than twice, the previous generated manifests secret becomes stale and you must reapply the newly generated secrets. -==== -// Above output was in AWS area but I believe belongs here. - -.. Verify that the {product-title} secrets are created: -+ -[source,terminal] ----- -$ ls /manifests ----- -+ -.Example output: -+ -[source,terminal] ----- -openshift-cluster-csi-drivers-alibaba-disk-credentials-credentials.yaml -openshift-image-registry-installer-cloud-credentials-credentials.yaml -openshift-ingress-operator-cloud-credentials-credentials.yaml -openshift-machine-api-alibabacloud-credentials-credentials.yaml ----- -+ -You can verify that the RAM users and policies are created by querying Alibaba Cloud. For more information, refer to Alibaba Cloud documentation on listing RAM users and policies. - -. Copy the generated credential files to the target manifests directory: -+ -[source,terminal] ----- -$ cp .//manifests/*credentials.yaml ./dir>/manifests/ ----- -+ -where: - -``:: Specifies the directory created by the `ccoctl alibabacloud create-ram-users` command. -``:: Specifies the directory in which the installation program creates files. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -ifdef::aws-sts,google-cloud-platform[] -.Verification - -* To verify that the {product-title} secrets are created, list the files in the `/manifests` directory: -+ -[source,terminal] ----- -$ ls /manifests ----- -endif::aws-sts,google-cloud-platform[] -ifdef::aws-sts[] -+ -.Example output: -+ -[source,terminal] ----- -cluster-authentication-02-config.yaml -openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml -openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml -openshift-image-registry-installer-cloud-credentials-credentials.yaml -openshift-ingress-operator-cloud-credentials-credentials.yaml -openshift-machine-api-aws-cloud-credentials-credentials.yaml ----- -//Would love a GCP version of the above output. - -You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. -endif::aws-sts[] -ifdef::google-cloud-platform[] -You can verify that the IAM service accounts are created by querying GCP. For more information, refer to GCP documentation on listing IAM service accounts. -endif::google-cloud-platform[] - -ifeval::["{context}" == "cco-mode-sts"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:!alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:!alibabacloud-customizations: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!alibabacloud-vpc: -endif::[] diff --git a/modules/cco-ccoctl-creating-individually.adoc b/modules/cco-ccoctl-creating-individually.adoc deleted file mode 100644 index d2404785a72d..000000000000 --- a/modules/cco-ccoctl-creating-individually.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc - -:_content-type: PROCEDURE -[id="cco-ccoctl-creating-individually_{context}"] -= Creating AWS resources individually - -If you need to review the JSON files that the `ccoctl` tool creates before modifying AWS resources, or if the process the `ccoctl` tool uses to create AWS resources automatically does not meet the requirements of your organization, you can create the AWS resources individually. For example, this option might be useful for an organization that shares the responsibility for creating these resources among different users or departments. - -Otherwise, you can use the `ccoctl aws create-all` command to create the AWS resources automatically. - -//to-do if possible: xref to modules/cco-ccoctl-creating-at-once.adoc for `create the AWS resources automatically` - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. - -Some `ccoctl` commands make AWS API calls to create or modify AWS resources. You can use the `--dry-run` flag to avoid making API calls. Using this flag creates JSON files on the local file system instead. You can review and modify the JSON files and then apply them with the AWS CLI tool using the `--cli-input-json` parameters. -==== - -.Prerequisites - -* Extract and prepare the `ccoctl` binary. - -.Procedure - -. Generate the public and private RSA key files that are used to set up the OpenID Connect provider for the cluster: -+ -[source,terminal] ----- -$ ccoctl aws create-key-pair ----- -+ -.Example output: -+ -[source,terminal] ----- -2021/04/13 11:01:02 Generating RSA keypair -2021/04/13 11:01:03 Writing private key to //serviceaccount-signer.private -2021/04/13 11:01:03 Writing public key to //serviceaccount-signer.public -2021/04/13 11:01:03 Copying signing key for use by installer ----- -+ -where `serviceaccount-signer.private` and `serviceaccount-signer.public` are the generated key files. -+ -This command also creates a private key that the cluster requires during installation in `//tls/bound-service-account-signing-key.key`. - -. Create an OpenID Connect identity provider and S3 bucket on AWS: -+ -[source,terminal] ----- -$ ccoctl aws create-identity-provider \ ---name= \ ---region= \ ---public-key-file=/serviceaccount-signer.public ----- -+ -where: -+ --- -** `` is the name used to tag any cloud resources that are created for tracking. -** `` is the AWS region in which cloud resources will be created. -** `` is the path to the public key file that the `ccoctl aws create-key-pair` command generated. --- -+ -.Example output: -+ -[source,terminal] ----- -2021/04/13 11:16:09 Bucket -oidc created -2021/04/13 11:16:10 OpenID Connect discovery document in the S3 bucket -oidc at .well-known/openid-configuration updated -2021/04/13 11:16:10 Reading public key -2021/04/13 11:16:10 JSON web key set (JWKS) in the S3 bucket -oidc at keys.json updated -2021/04/13 11:16:18 Identity Provider created with ARN: arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com ----- -+ -where `openid-configuration` is a discovery document and `keys.json` is a JSON web key set file. -+ -This command also creates a YAML configuration file in `//manifests/cluster-authentication-02-config.yaml`. This file sets the issuer URL field for the service account tokens that the cluster generates, so that the AWS IAM identity provider trusts the tokens. - -. Create IAM roles for each component in the cluster. - -.. Extract the list of `CredentialsRequest` objects from the {product-title} release image: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ ---cloud=aws \ ---to=/credrequests <1> ---from=quay.io//ocp-release: ----- -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. - -.. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on AWS -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cloud-credential-operator_05-iam-ro-credentialsrequest.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <5> -0000_50_cluster-storage-operator_03_credentials_request_aws.yaml <6> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Cloud Credential Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Network Operator CR is required. -<6> The Storage Operator CR is an optional component and might be disabled in your cluster. - -.. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com ----- -+ -[NOTE] -==== -For AWS environments that use alternative IAM API endpoints, such as GovCloud, you must also specify your region with the `--region` parameter. - -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -+ -For each `CredentialsRequest` object, `ccoctl` creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy as defined in each `CredentialsRequest` object from the {product-title} release image. - -.Verification - -* To verify that the {product-title} secrets are created, list the files in the `/manifests` directory: -+ -[source,terminal] ----- -$ ll /manifests ----- -+ -.Example output: -+ -[source,terminal] ----- -total 24 --rw-------. 1 161 Apr 13 11:42 cluster-authentication-02-config.yaml --rw-------. 1 379 Apr 13 11:59 openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml --rw-------. 1 353 Apr 13 11:59 openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml --rw-------. 1 355 Apr 13 11:59 openshift-image-registry-installer-cloud-credentials-credentials.yaml --rw-------. 1 339 Apr 13 11:59 openshift-ingress-operator-cloud-credentials-credentials.yaml --rw-------. 1 337 Apr 13 11:59 openshift-machine-api-aws-cloud-credentials-credentials.yaml ----- - -You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. diff --git a/modules/cco-ccoctl-deleting-sts-resources.adoc b/modules/cco-ccoctl-deleting-sts-resources.adoc deleted file mode 100644 index 3c3ceb12b8c8..000000000000 --- a/modules/cco-ccoctl-deleting-sts-resources.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/uninstalling-cluster-aws.adoc -// * installing/installing_gcp/uninstalling-cluster-gcp.adoc - -ifeval::["{context}" == "uninstall-cluster-aws"] -:aws-sts: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:google-cloud-platform: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-deleting-sts-resources_{context}"] -ifdef::aws-sts[] -= Deleting AWS resources with the Cloud Credential Operator utility - -To clean up resources after uninstalling an {product-title} cluster with the Cloud Credential Operator (CCO) in manual mode with STS, you can use the CCO utility (`ccoctl`) to remove the AWS resources that `ccoctl` created during installation. -endif::aws-sts[] - -ifdef::google-cloud-platform[] -= Deleting GCP resources with the Cloud Credential Operator utility - -To clean up resources after uninstalling an {product-title} cluster with the Cloud Credential Operator (CCO) in manual mode with GCP Workload Identity, you can use the CCO utility (`ccoctl`) to remove the GCP resources that `ccoctl` created during installation. -endif::google-cloud-platform[] - -.Prerequisites - -* Extract and prepare the `ccoctl` binary. -ifdef::aws-sts[] -* Install an {product-title} cluster with the CCO in manual mode with STS. -endif::aws-sts[] -ifdef::google-cloud-platform[] -* Install an {product-title} cluster with the CCO in manual mode with GCP Workload Identity. -endif::google-cloud-platform[] - -.Procedure - -ifdef::aws-sts[] -* Delete the AWS resources that `ccoctl` created: -+ -[source,terminal] ----- -$ ccoctl aws delete \ - --name= \ <1> - --region= <2> ----- -+ -<1> `` matches the name that was originally used to create and tag the cloud resources. -<2> `` is the AWS region in which to delete cloud resources. -+ -.Example output: -+ -[source,terminal] ----- -2021/04/08 17:50:41 Identity Provider object .well-known/openid-configuration deleted from the bucket -oidc -2021/04/08 17:50:42 Identity Provider object keys.json deleted from the bucket -oidc -2021/04/08 17:50:43 Identity Provider bucket -oidc deleted -2021/04/08 17:51:05 Policy -openshift-cloud-credential-operator-cloud-credential-o associated with IAM Role -openshift-cloud-credential-operator-cloud-credential-o deleted -2021/04/08 17:51:05 IAM Role -openshift-cloud-credential-operator-cloud-credential-o deleted -2021/04/08 17:51:07 Policy -openshift-cluster-csi-drivers-ebs-cloud-credentials associated with IAM Role -openshift-cluster-csi-drivers-ebs-cloud-credentials deleted -2021/04/08 17:51:07 IAM Role -openshift-cluster-csi-drivers-ebs-cloud-credentials deleted -2021/04/08 17:51:08 Policy -openshift-image-registry-installer-cloud-credentials associated with IAM Role -openshift-image-registry-installer-cloud-credentials deleted -2021/04/08 17:51:08 IAM Role -openshift-image-registry-installer-cloud-credentials deleted -2021/04/08 17:51:09 Policy -openshift-ingress-operator-cloud-credentials associated with IAM Role -openshift-ingress-operator-cloud-credentials deleted -2021/04/08 17:51:10 IAM Role -openshift-ingress-operator-cloud-credentials deleted -2021/04/08 17:51:11 Policy -openshift-machine-api-aws-cloud-credentials associated with IAM Role -openshift-machine-api-aws-cloud-credentials deleted -2021/04/08 17:51:11 IAM Role -openshift-machine-api-aws-cloud-credentials deleted -2021/04/08 17:51:39 Identity Provider with ARN arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com deleted ----- -//Would love a GCP version of the above output. -endif::aws-sts[] -ifdef::google-cloud-platform[] -. Obtain the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- - -. Extract the list of `CredentialsRequest` custom resources (CRs) from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ - --cloud=gcp \ - --to=/credrequests \ <1> - $RELEASE_IMAGE ----- -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. - -. Delete the GCP resources that `ccoctl` created: -+ -[source,terminal] ----- -$ ccoctl gcp delete \ - --name= \ <1> - --project= \ <2> - --credentials-requests-dir=/credrequests ----- -+ -<1> `` matches the name that was originally used to create and tag the cloud resources. -<2> `` is the GCP project ID in which to delete cloud resources. -endif::google-cloud-platform[] - -.Verification - -ifdef::aws-sts[] -* To verify that the resources are deleted, query AWS. For more information, refer to AWS documentation. -endif::aws-sts[] - -ifdef::google-cloud-platform[] -* To verify that the resources are deleted, query GCP. For more information, refer to GCP documentation. -endif::google-cloud-platform[] - -ifeval::["{context}" == "uninstall-cluster-aws"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:!google-cloud-platform: -endif::[] diff --git a/modules/cco-ccoctl-upgrading.adoc b/modules/cco-ccoctl-upgrading.adoc deleted file mode 100644 index 6117c189f980..000000000000 --- a/modules/cco-ccoctl-upgrading.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc - - -:_content-type: PROCEDURE -[id="cco-ccoctl-upgrading_{context}"] -= Updating cloud provider resources with the Cloud Credential Operator utility - -The process for upgrading an {product-title} cluster that was configured using the CCO utility (`ccoctl`) is similar to creating the cloud provider resources during installation. - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. - -On AWS clusters, some `ccoctl` commands make AWS API calls to create or modify AWS resources. You can use the `--dry-run` flag to avoid making API calls. Using this flag creates JSON files on the local file system instead. You can review and modify the JSON files and then apply them with the AWS CLI tool using the `--cli-input-json` parameters. -==== - -.Prerequisites - -* Obtain the {product-title} release image for the version that you are upgrading to. - -* Extract and prepare the `ccoctl` binary from the release image. - -.Procedure - -. Extract the list of `CredentialsRequest` custom resources (CRs) from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ - --cloud= \ - --to=/credrequests \ - quay.io//ocp-release: ----- -+ -where: -+ --- -* `` is the value for your cloud provider. Valid values are `alibabacloud`, `aws`, `gcp`, `ibmcloud`, and `nutanix`. -* `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. --- - -. For each `CredentialsRequest` CR in the release image, ensure that a namespace that matches the text in the `spec.secretRef.namespace` field exists in the cluster. This field is where the generated secrets that hold the credentials configuration are stored. -+ -.Sample AWS `CredentialsRequest` object -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cloud-credential-operator-iam-ro - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - effect: Allow - action: - - iam:GetUser - - iam:GetUserPolicy - - iam:ListAccessKeys - resource: "*" - secretRef: - name: cloud-credential-operator-iam-ro-creds - namespace: openshift-cloud-credential-operator <1> ----- -<1> This field indicates the namespace which needs to exist to hold the generated secret. -+ -The `CredentialsRequest` CRs for other platforms have a similar format with different platform-specific values. - -. For any `CredentialsRequest` CR for which the cluster does not already have a namespace with the name specified in `spec.secretRef.namespace`, create the namespace by running the following command: -+ -[source,terminal] ----- -$ oc create namespace ----- - -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory by running the command for your cloud provider. The following commands process `CredentialsRequest` objects: -+ --- -* {alibaba}: `ccoctl alibabacloud create-ram-users` -* Amazon Web Services (AWS): `ccoctl aws create-iam-roles` -* Google Cloud Platform (GCP): `ccoctl gcp create-all` -* IBM Cloud: `ccoctl ibmcloud create-service-id` -* Nutanix: `ccoctl nutanix create-shared-secrets` --- -+ -[IMPORTANT] -==== -Refer to the `ccoctl` utility instructions in the installation content for your cloud provider for important platform-specific details about the required arguments and special considerations. -==== -+ -For each `CredentialsRequest` object, `ccoctl` creates the required provider resources and a permissions policy as defined in each `CredentialsRequest` object from the {product-title} release image. - -. Apply the secrets to your cluster by running the following command: -+ -[source,terminal] ----- -$ ls /manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -.Verification - -You can verify that the required provider resources and permissions policies are created by querying the cloud provider. For more information, refer to your cloud provider documentation on listing roles or service accounts. - -.Next steps - -* Update the `upgradeable-to` annotation to indicate that the cluster is ready to upgrade. diff --git a/modules/cco-determine-mode-cli.adoc b/modules/cco-determine-mode-cli.adoc deleted file mode 100644 index 818b0666394a..000000000000 --- a/modules/cco-determine-mode-cli.adoc +++ /dev/null @@ -1,140 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: PROCEDURE - -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:about-cco: -endif::[] - -[id="cco-determine-mode-cli_{context}"] -= Determining the Cloud Credential Operator mode by using the CLI - -You can determine what mode the Cloud Credential Operator (CCO) is configured to use by using the CLI. - -[NOTE] -==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator permissions. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Log in to `oc` on the cluster as a user with the `cluster-admin` role. - -. To determine the mode that the CCO is configured to use, enter the following command: -+ -[source,terminal] ----- -$ oc get cloudcredentials cluster \ - -o=jsonpath={.spec.credentialsMode} ----- -+ -The following output values are possible, though not all are supported on all platforms: -+ --- -* `''`: The CCO is operating in the default mode. In this configuration, the CCO operates in mint or passthrough mode, depending on the credentials provided during installation. -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. -* `Manual`: The CCO is operating in manual mode. --- -+ -[IMPORTANT] -==== -To determine the specific configuration of an AWS or GCP cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. - -AWS and GCP clusters support using mint mode with the root secret deleted. -ifdef::update[] -If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. -endif::update[] - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. -==== - -ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n kube-system \ - -o jsonpath \ - --template '{ .metadata.annotations }' ----- -+ -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. -+ -This command displays the value of the `.metadata.annotations` parameter in the cluster root secret object. The following output values are possible: -+ --- -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. --- -+ -If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. -endif::about-cco[] - -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n=kube-system ----- -+ -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. -+ -If the root secret is present, the output of this command returns information about the secret. An error indicates that the root secret is not present on the cluster. - -. AWS or GCP clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, run the following command: -+ -[source,terminal] ----- -$ oc get authentication cluster \ - -o jsonpath \ - --template='{ .spec.serviceAccountIssuer }' ----- -+ -This command displays the value of the `.spec.serviceAccountIssuer` parameter in the cluster `Authentication` object. -+ --- -* An output of a URL that is associated with your cloud provider indicates that the CCO is using manual mode with AWS STS or GCP Workload Identity to create and manage cloud credentials from outside of the cluster. These clusters are configured using the `ccoctl` utility. - -* An empty output indicates that the cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility. --- - -ifdef::update[] -.Next steps - -* If you are updating a cluster that has the CCO operating in mint or passthrough mode and the root secret is present, you do not need to update any cloud provider resources and can continue to the next part of the update process. - -* If your cluster is using the CCO in mint mode with the root secret removed, you must reinstate the credential secret with the administrator-level credential before continuing to the next part of the update process. - -* If your cluster was configured using the CCO utility (`ccoctl`), you must take the following actions: - -.. Configure the `ccoctl` utility for the new release and use it to update the cloud provider resources. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. - -* If your cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility, you must take the following actions: - -.. Manually update the cloud provider resources for the new release. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. -endif::update[] - -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:!about-cco: -endif::[] diff --git a/modules/cco-determine-mode-gui.adoc b/modules/cco-determine-mode-gui.adoc deleted file mode 100644 index d81b66a463db..000000000000 --- a/modules/cco-determine-mode-gui.adoc +++ /dev/null @@ -1,163 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: PROCEDURE - -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:about-cco: -endif::[] - -[id="cco-determine-mode-gui_{context}"] -= Determining the Cloud Credential Operator mode by using the web console - -You can determine what mode the Cloud Credential Operator (CCO) is configured to use by using the web console. - -[NOTE] -==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator permissions. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Navigate to *Administration* -> *Cluster Settings*. - -. On the *Cluster Settings* page, select the *Configuration* tab. - -. Under *Configuration resource*, select *CloudCredential*. - -. On the *CloudCredential details* page, select the *YAML* tab. - -. In the YAML block, check the value of `spec.credentialsMode`. The following values are possible, though not all are supported on all platforms: -+ --- -* `''`: The CCO is operating in the default mode. In this configuration, the CCO operates in mint or passthrough mode, depending on the credentials provided during installation. -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. -* `Manual`: The CCO is operating in manual mode. --- -+ -[IMPORTANT] -==== -To determine the specific configuration of an AWS or GCP cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. - -AWS and GCP clusters support using mint mode with the root secret deleted. -ifdef::update[] -If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. -endif::update[] - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. -==== - -ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, inspect the annotations on the cluster root secret: - -.. Navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. -+ -[NOTE] -==== -Ensure that the *Project* dropdown is set to *All Projects*. -==== -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -|GCP -|`gcp-credentials` - -|=== - -.. To view the CCO mode that the cluster is using, click `1 annotation` under *Annotations*, and check the value field. The following values are possible: -+ --- -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. --- -+ -If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. -endif::about-cco[] - -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. -+ -[NOTE] -==== -Ensure that the *Project* dropdown is set to *All Projects*. -==== -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -|GCP -|`gcp-credentials` - -|=== -+ --- -* If you see one of these values, your cluster is using mint or passthrough mode with the root secret present. -* If you do not see these values, your cluster is using the CCO in mint mode with the root secret removed. --- - -. AWS or GCP clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, you must check the cluster `Authentication` object YAML values. - -.. Navigate to *Administration* -> *Cluster Settings*. - -.. On the *Cluster Settings* page, select the *Configuration* tab. - -.. Under *Configuration resource*, select *Authentication*. - -.. On the *Authentication details* page, select the *YAML* tab. - -.. In the YAML block, check the value of the `.spec.serviceAccountIssuer` parameter. -+ --- -* A value that contains a URL that is associated with your cloud provider indicates that the CCO is using manual mode with AWS STS or GCP Workload Identity to create and manage cloud credentials from outside of the cluster. These clusters are configured using the `ccoctl` utility. - -* An empty value (`''`) indicates that the cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility. --- - -ifdef::update[] -.Next steps - -* If you are updating a cluster that has the CCO operating in mint or passthrough mode and the root secret is present, you do not need to update any cloud provider resources and can continue to the next part of the update process. - -* If your cluster is using the CCO in mint mode with the root secret removed, you must reinstate the credential secret with the administrator-level credential before continuing to the next part of the update process. - -* If your cluster was configured using the CCO utility (`ccoctl`), you must take the following actions: - -.. Configure the `ccoctl` utility for the new release and use it to update the cloud provider resources. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. - -* If your cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility, you must take the following actions: - -.. Manually update the cloud provider resources for the new release. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. -endif::update[] - -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:!about-cco: -endif::[] \ No newline at end of file diff --git a/modules/cco-manual-upgrade-annotation.adoc b/modules/cco-manual-upgrade-annotation.adoc deleted file mode 100644 index 37e3974ada89..000000000000 --- a/modules/cco-manual-upgrade-annotation.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc -// * updating/preparing_for_updates/preparing-manual-creds-update.adoc - -:_content-type: PROCEDURE - -[id="cco-manual-upgrade-annotation_{context}"] -= Indicating that the cluster is ready to upgrade - -The Cloud Credential Operator (CCO) `Upgradable` status for a cluster with manually maintained credentials is `False` by default. - -.Prerequisites - -* For the release image that you are upgrading to, you have processed any new credentials manually or by using the Cloud Credential Operator utility (`ccoctl`). -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Log in to `oc` on the cluster as a user with the `cluster-admin` role. - -. Edit the `CloudCredential` resource to add an `upgradeable-to` annotation within the `metadata` field by running the following command: -+ -[source,terminal] ----- -$ oc edit cloudcredential cluster ----- -+ -.Text to add -+ -[source,yaml] ----- -... - metadata: - annotations: - cloudcredential.openshift.io/upgradeable-to: -... ----- -+ -Where `` is the version that you are upgrading to, in the format `x.y.z`. For example, use `4.12.2` for {product-title} 4.12.2. -+ -It may take several minutes after adding the annotation for the upgradeable status to change. - -.Verification - -//Would like to add CLI steps for same -. In the *Administrator* perspective of the web console, navigate to *Administration* -> *Cluster Settings*. - -. To view the CCO status details, click *cloud-credential* in the *Cluster Operators* list. -+ --- -* If the *Upgradeable* status in the *Conditions* section is *False*, verify that the `upgradeable-to` annotation is free of typographical errors. --- - -. When the *Upgradeable* status in the *Conditions* section is *True*, begin the {product-title} upgrade. \ No newline at end of file diff --git a/modules/ccs-aws-customer-procedure.adoc b/modules/ccs-aws-customer-procedure.adoc deleted file mode 100644 index d2e64d20d051..000000000000 --- a/modules/ccs-aws-customer-procedure.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -:_content-type: PROCEDURE -[id="ccs-aws-customer-procedure_{context}"] -= Required customer procedure -// TODO: Better procedure heading that tells you what this is doing - -The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer’s Amazon Web Services (AWS) account. Red Hat requires several prerequisites in order to provide these services. - -.Procedure - -. If the customer is using AWS Organizations, you must either use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. - -. To ensure that Red Hat can perform necessary actions, you must either create a service control policy (SCP) or ensure that none is applied to the AWS account. - -. link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html[Attach] the SCP to the AWS account. - -. Within the AWS account, you must link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html[create] an `osdCcsAdmin` IAM user with the following requirements: -** This user needs at least *Programmatic access* enabled. -** This user must have the `AdministratorAccess` policy attached to it. - -. Provide the IAM user credentials to Red Hat. -** You must provide the *access key ID* and *secret access key* in {cluster-manager-url}. diff --git a/modules/ccs-aws-customer-requirements.adoc b/modules/ccs-aws-customer-requirements.adoc deleted file mode 100644 index 16f2ed31ab09..000000000000 --- a/modules/ccs-aws-customer-requirements.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-customer-requirements_{context}"] -= Customer requirements - - -{product-title} clusters using a Customer Cloud Subscription (CCS) model on Amazon Web Services (AWS) must meet several prerequisites before they can be deployed. - -[id="ccs-requirements-account_{context}"] -== Account - -* The customer ensures that link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[AWS limits] are sufficient to support {product-title} provisioned within the customer-provided AWS account. - -* The customer-provided AWS account should be in the customer's AWS Organization with the applicable service control policy (SCP) applied. -+ -[NOTE] -==== -It is not a requirement that the customer-provided account be within an AWS Organization or for the SCP to be applied, however Red Hat must be able to perform all the actions listed in the SCP without restriction. -==== - -* The customer-provided AWS account must not be transferable to Red Hat. - -* The customer may not impose AWS usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. - -* Red Hat deploys monitoring into AWS to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided AWS account. - -* The customer can deploy native AWS services within the same customer-provided AWS account. -+ -[NOTE] -==== -Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. -==== - -[id="ccs-requirements-access_{context}"] -== Access requirements - -* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. -+ -[NOTE] -==== -This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided AWS account. -==== - -* Red Hat must have AWS console access to the customer-provided AWS account. This access is protected and managed by Red Hat. - -* The customer must not utilize the AWS account to elevate their permissions within the {product-title} cluster. - -* Actions available in {cluster-manager-url} must not be directly performed in the customer-provided AWS account. - -[id="ccs-requirements-support_{context}"] -== Support requirements - -* Red Hat recommends that the customer have at least link:https://aws.amazon.com/premiumsupport/plans/[Business Support] from AWS. - -* Red Hat has authority from the customer to request AWS support on their behalf. - -* Red Hat has authority from the customer to request AWS resource limit increases on the customer-provided account. - -* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. - -[id="ccs-requirements-security_{context}"] -== Security requirements - -* The customer-provided IAM credentials must be unique to the customer-provided AWS account and must not be stored anywhere in the customer-provided AWS account. - -* Volume snapshots will remain within the customer-provided AWS account and customer-specified region. - -* Red Hat must have ingress access to EC2 hosts and the API server through white-listed Red Hat machines. - -* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-aws-iam.adoc b/modules/ccs-aws-iam.adoc deleted file mode 100644 index 09699c1f5372..000000000000 --- a/modules/ccs-aws-iam.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-iam_{context}"] -= Red Hat managed IAM references for AWS - -Red Hat is responsible for creating and managing the following Amazon Web Services (AWS) resources: IAM policies, IAM users, and IAM roles. - -[id="aws-policy-iam-policies_{context}"] -== IAM policies - -[NOTE] -==== -IAM policies are subject to modification as the capabilities of {product-title} change. -==== - -* The `AdministratorAccess` policy is used by the administration role. This policy provides Red Hat the access necessary to administer the {product-title} cluster in the customer-provided AWS account. -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "*", - "Resource": "*", - "Effect": "Allow" - } - ] -} ----- - -* The `CustomerAdministratorAccess` role provides the customer access to administer a subset of services within the AWS account. At this time, the following are allowed: - -** VPC Peering -** VPN Setup -** Direct Connect (only available if granted through the service control policy) -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:AttachVpnGateway", - "ec2:DescribeVpnConnections", - "ec2:AcceptVpcPeeringConnection", - "ec2:DeleteVpcPeeringConnection", - "ec2:DescribeVpcPeeringConnections", - "ec2:CreateVpnConnectionRoute", - "ec2:RejectVpcPeeringConnection", - "ec2:DetachVpnGateway", - "ec2:DeleteVpnConnectionRoute", - "ec2:DeleteVpnGateway", - "ec2:DescribeVpcs", - "ec2:CreateVpnGateway", - "ec2:ModifyVpcPeeringConnectionOptions", - "ec2:DeleteVpnConnection", - "ec2:CreateVpcPeeringConnection", - "ec2:DescribeVpnGateways", - "ec2:CreateVpnConnection", - "ec2:DescribeRouteTables", - "ec2:CreateTags", - "ec2:CreateRoute", - "directconnect:*" - ], - "Resource": "*" - } - ] -} ----- - - -* If enabled, the `BillingReadOnlyAccess` role provides read-only access to view billing and usage information for the account. -+ -Billing and usage access is only granted if the root account in the AWS Organization has it enabled. This is an optional step the customer must perform to enable read-only billing and usage access and does not impact the creation of this profile and the role that uses it. If this role is not enabled, users will not see billing and usage information. See this tutorial on link:https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_billing.html#tutorial-billing-step1[how to enable access to billing data]. -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "aws-portal:ViewAccount", - "aws-portal:ViewBilling" - ], - "Resource": "*" - } - ] -} ----- - -[id="aws-policy-iam-users_{context}"] -== IAM users - -The `osdManagedAdmin` user is created immediately after taking control of the customer-provided AWS account. This is the user that will perform the {product-title} cluster installation. - -[id="aws-policy-iam-roles_{context}"] -== IAM roles - -* The `network-mgmt` role provides customer-federated administrative access to the AWS account through a separate AWS account. It also has the same access as a read-only role. The `network-mgmt` role only applies to non-Customer Cloud Subscription (CCS) clusters. The following policies are attached to the role: - -** AmazonEC2ReadOnlyAccess -** CustomerAdministratorAccess - -* The `read-only` role provides customer-federated read-only access to the AWS account through a separate AWS account. The following policies are attached to the role: - -** AWSAccountUsageReportAccess -** AmazonEC2ReadOnlyAccess -** AmazonS3ReadOnlyAccess -** IAMReadOnlyAccess -** BillingReadOnlyAccess diff --git a/modules/ccs-aws-provisioned.adoc b/modules/ccs-aws-provisioned.adoc deleted file mode 100644 index b17d3f0205cd..000000000000 --- a/modules/ccs-aws-provisioned.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-provisioned_{context}"] -= Provisioned AWS Infrastructure - - -This is an overview of the provisioned Amazon Web Services (AWS) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned AWS components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. - -[id="aws-policy-ec2_{context}"] -== AWS Elastic Computing (EC2) instances - -AWS EC2 instances are required to deploy the control plane and data plane functions of {product-title} in the AWS public cloud. Instance types might vary for control plane and infrastructure nodes depending on worker node count. - -* Single availability zone -** 3 m5.2xlarge minimum (control plane nodes) -** 2 r5.xlarge minimum (infrastructure nodes) -** 2 m5.xlarge minimum but highly variable (worker nodes) - -* Multiple availability zones -** 3 m5.2xlarge minimum (control plane nodes) -** 3 r5.xlarge minimum (infrastructure nodes) -** 3 m5.xlarge minimum but highly variable (worker nodes) - -[id="aws-policy-ebs-storage_{context}"] -== AWS Elastic Block Store (EBS) storage - -Amazon EBS block storage is used for both local node storage and persistent volume storage. - -Volume requirements for each EC2 instance: - -- Control plane volumes -* Size: 350 GB -* Type: io1 -* Input/output operations per second: 1000 - -- Infrastructure volumes -* Size: 300 GB -* Type: gp2 -* Input/output operations per second: 900 - -- Worker volumes -* Size: 300 GB -* Type: gp2 -* Input/output operations per second: 900 - -[id="aws-policy-elastic-load-balancers_{context}"] -== Elastic Load Balancing (ELB) load balancers - -Up to two Network Load Balancers for API and up to two Classic Load Balancers for application router. For more information, see the link:https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products[ELB documentation for AWS]. - -[id="aws-policy-s3-storage_{context}"] -== S3 storage -The image registry and Elastic Block Store (EBS) volume snapshots are backed by AWS S3 storage. Pruning of resources is performed regularly to optimize S3 usage and cluster performance. - -[NOTE] -==== -Two buckets are required with a typical size of 2 TB each. -==== - -[id="aws-policy-vpc_{context}"] -== VPC -Customers should expect to see one VPC per cluster. Additionally, the VPC needs the following configurations: - -* *Subnets*: Two subnets for a cluster with a single availability zone, or six subnets for a cluster with multiple availability zones. -+ -[NOTE] -==== -A *public subnet* connects directly to the internet through an internet gateway. A *private subnet* connects to the internet through a network address translation (NAT) gateway. -==== -+ -* *Route tables*: One route table per private subnet, and one additional table per cluster. - -* *Internet gateways*: One Internet Gateway per cluster. - -* *NAT gateways*: One NAT Gateway per public subnet. - -=== Sample VPC Architecture - -image::VPC-Diagram.png[VPC Reference Architecture] - -[id="aws-policy-security-groups_{context}"] -== Security groups - -AWS security groups provide security at the protocol and port-access level; they are associated with EC2 instances and Elastic Load Balancing. Each security group contains a set of rules that filter traffic coming in and out of an EC2 instance. You must ensure the ports required for the link:https://docs.openshift.com/container-platform/4.7/installing/installing_aws/installing-aws-user-infra.html#installation-aws-user-infra-other-infrastructure_installing-aws-user-infra[{OCP} installation] are open on your network and configured to allow access between hosts. diff --git a/modules/ccs-aws-scp.adoc b/modules/ccs-aws-scp.adoc deleted file mode 100644 index 677e6ad70ace..000000000000 --- a/modules/ccs-aws-scp.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-scp_{context}"] -= Minimum required service control policy (SCP) - - -Service control policy (SCP) management is the responsibility of the customer. These policies are maintained in the AWS Organization and control what services are available within the attached AWS accounts. - -[cols="2a,2a,2a,2a",options="header"] - -|=== -| Required/optional -| Service -| Actions -| Effect - -.15+| Required -|Amazon EC2 | All |Allow -|Amazon EC2 Auto Scaling | All |Allow -|Amazon S3| All |Allow -|Identity And Access Management | All |Allow -|Elastic Load Balancing | All |Allow -|Elastic Load Balancing V2| All |Allow -|Amazon CloudWatch | All |Allow -|Amazon CloudWatch Events | All |Allow -|Amazon CloudWatch Logs | All |Allow -|AWS Support | All |Allow -|AWS Key Management Service | All |Allow -|AWS Security Token Service | All |Allow -|AWS Resource Tagging | All |Allow -|AWS Route53 DNS | All |Allow -|AWS Service Quotas | ListServices - -GetRequestedServiceQuotaChange - -GetServiceQuota - -RequestServiceQuotaIncrease - -ListServiceQuotas -| Allow - - -.3+|Optional - -| AWS Billing -| ViewAccount - -Viewbilling - -ViewUsage -| Allow - -|AWS Cost and Usage Report -|All -|Allow - -|AWS Cost Explorer Services -|All -|Allow - - -|=== - -// TODO: Need some sort of intro into whatever this is -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "iam:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "cloudwatch:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "events:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "logs:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "support:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "kms:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "tag:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "servicequotas:ListServices", - "servicequotas:GetRequestedServiceQuotaChange", - "servicequotas:GetServiceQuota", - "servicequotas:RequestServiceQuotaIncrease", - "servicequotas:ListServiceQuotas" - ], - "Resource": [ - "*" - ] - } - ] -} ----- diff --git a/modules/ccs-aws-understand.adoc b/modules/ccs-aws-understand.adoc deleted file mode 100644 index b5c6ae6218bc..000000000000 --- a/modules/ccs-aws-understand.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -:_content-type: CONCEPT -[id="ccs-aws-understand_{context}"] -= Understanding Customer Cloud Subscriptions on AWS - - -To deploy {product-title} into your existing Amazon Web Services (AWS) account using the Customer Cloud Subscription (CCS) model, Red Hat requires several prerequisites be met. - -Red Hat recommends the usage of an AWS Organization to manage multiple AWS accounts. The AWS Organization, managed by the customer, hosts multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy. - -It is recommended for the {product-title} cluster using a CCS model to be hosted in an AWS account within an AWS Organizational Unit. A service control policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organization are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within the AWS Organization. diff --git a/modules/ccs-gcp-customer-procedure.adoc b/modules/ccs-gcp-customer-procedure.adoc deleted file mode 100644 index 7a22e6e38685..000000000000 --- a/modules/ccs-gcp-customer-procedure.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc -:_content-type: PROCEDURE -[id="ccs-gcp-customer-procedure_{context}"] - -= Required customer procedure -// TODO: Same as other module - Better procedure heading that tells you what this is doing - - -The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer's Google Cloud Platform (GCP) project. Red Hat requires several prerequisites to provide these services. - -[WARNING] -==== -To use {product-title} in your GCP project, the following GCP organizational policy constraints cannot be in place: - -* `constraints/iam.allowedPolicyMemberDomains` -* `constraints/compute.restrictLoadBalancerCreationForTypes` -* `constraints/compute.requireShieldedVm` -* `constraints/compute.vmExternalIpAccess` (This policy constraint is unsupported only during installation. You can re-enable the policy constraint after installation.) -==== - -.Procedure - -. link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Create a Google Cloud project] to host the {product-title} cluster. -+ -[NOTE] -==== -The project name must be 10 characters or less. -==== - -. link:https://cloud.google.com/service-usage/docs/enable-disable#enabling[Enable] the following required APIs in the project that hosts your {product-title} cluster: -+ -.Required API services -[cols="2a,3a",options="header"] -|=== -|API service |Console service name - - -|link:https://console.cloud.google.com/apis/library/deploymentmanager.googleapis.com?pli=1&project=openshift-gce-devel&folder=&organizationId=[Cloud Deployment Manager V2 API] -|`deploymentmanager.googleapis.com` - - -|link:https://console.cloud.google.com/apis/library/compute.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Compute Engine API] -|`compute.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/cloudapis.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud APIs] -|`cloudapis.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Resource Manager API] -|`cloudresourcemanager.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/dns.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google DNS API] -|`dns.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/networksecurity.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Network Security API] -|`networksecurity.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/iamcredentials.googleapis.com[IAM Service Account Credentials API] -|`iamcredentials.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/iam.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Identity and Access Management (IAM) API] -|`iam.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/servicemanagement.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Management API] -|`servicemanagement.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/serviceusage.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Usage API] -|`serviceusage.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/storage-api.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud Storage JSON API] -|`storage-api.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/storage-component.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Storage] -|`storage-component.googleapis.com` - -|=== - -. To ensure that Red Hat can perform necessary actions, you must create an `osd-ccs-admin` IAM link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[service account] user within the GCP project. -+ -The following roles must be link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource[granted to the service account]: -+ -.Required roles -[cols="2a,3a",options="header"] - -|=== - -|Role|Console role name - -|Compute Admin -|`roles/compute.admin` - -|DNS Admin -|`roles/dns.admin` - -|Organizational Policy Viewer -|`roles/orgpolicy.policyViewer` - -|Owner -|`roles/owner` - -|Project IAM Admin -|`roles/resourcemanager.projectIamAdmin` - -|Service Management Administrator -|`roles/servicemanagement.admin` - -|Service Usage Admin -|`roles/serviceusage.serviceUsageAdmin` - -|Storage Admin -|`roles/storage.admin` - -|=== - -. link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Create the service account key] for the `osd-ccs-admin` IAM service account. Export the key to a file named `osServiceAccount.json`; this JSON file will be uploaded in {cluster-manager-first} when you create your cluster. diff --git a/modules/ccs-gcp-customer-requirements.adoc b/modules/ccs-gcp-customer-requirements.adoc deleted file mode 100644 index de3f6ce96bf9..000000000000 --- a/modules/ccs-gcp-customer-requirements.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -[id="ccs-gcp-customer-requirements_{context}"] -= Customer requirements - - -{product-title} clusters using a Customer Cloud Subscription (CCS) model on Google Cloud Platform (GCP) must meet several prerequisites before they can be deployed. - -[id="ccs-gcp-requirements-account_{context}"] -== Account - -* The customer ensures that link:https://cloud.google.com/storage/quotas[Google Cloud limits] are sufficient to support {product-title} provisioned within the customer-provided GCP account. - -* The customer-provided GCP account should be in the customer's Google Cloud Organization with the applicable Service Account applied. - -* The customer-provided GCP account must not be transferable to Red Hat. - -* The customer may not impose GCP usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. - -* Red Hat deploys monitoring into GCP to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided GCP account. - -* The customer can deploy native GCP services within the same customer-provided GCP account. -+ -[NOTE] -==== -Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. -==== - -[id="ccs-gcp-requirements-access_{context}"] -== Access requirements - -* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. -+ -[NOTE] -==== -This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided GCP account. -==== - -* Red Hat must have GCP console access to the customer-provided GCP account. This access is protected and managed by Red Hat. - -* The customer must not utilize the GCP account to elevate their permissions within the {product-title} cluster. - -* Actions available in the {cluster-manager-url} must not be directly performed in the customer-provided GCP account. - -[id="ccs-gcp-requirements-support_{context}"] -== Support requirements - -* Red Hat recommends that the customer have at least link:https://cloud.google.com/support[Production Support] from GCP. - -* Red Hat has authority from the customer to request GCP support on their behalf. - -* Red Hat has authority from the customer to request GCP resource limit increases on the customer-provided account. - -* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. - -[id="ccs-gcp-requirements-security_{context}"] -== Security requirements - -* The customer-provided IAM credentials must be unique to the customer-provided GCP account and must not be stored anywhere in the customer-provided GCP account. - -* Volume snapshots will remain within the customer-provided GCP account and customer-specified region. - -* Red Hat must have ingress access to the API server through white-listed Red Hat machines. - -* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-gcp-iam.adoc b/modules/ccs-gcp-iam.adoc deleted file mode 100644 index 7bdee7a0dece..000000000000 --- a/modules/ccs-gcp-iam.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc -[id="ccs-gcp-iam_{context}"] - -= Red Hat managed Google Cloud resources - - -Red Hat is responsible for creating and managing the following IAM Google Cloud Platform (GCP) resources. - -[id="ccs-gcp-iam-service-account-roles_{context}"] -== IAM service account and roles - -The `osd-managed-admin` IAM service account is created immediately after taking control of the customer-provided GCP account. This is the user that will perform the {product-title} cluster installation. - -The following roles are attached to the service account: - -.IAM roles for osd-managed-admin -[cols="2a,3a,2a",options="header"] - -|=== - -|Role |Console role name |Description - -|Compute Admin -|`roles/compute.admin` -|Provides full control of all Compute Engine resources. - -|DNS Administrator -|`roles/dns.admin` -|Provides read-write access to all Cloud DNS resources. - -|Security Admin -|`roles/iam.securityAdmin` -|Security admin role, with permissions to get and set any IAM policy. - -|Storage Admin -|`roles/storage.admin` -|Grants full control of objects and buckets. - -When applied to an individual *bucket*, control applies only to the specified bucket and objects within the bucket. - -|Service Account Admin -|`roles/iam.serviceAccountAdmin` -|Create and manage service accounts. - -|Service Account Key Admin -|`roles/iam.serviceAccountKeyAdmin` -|Create and manage (and rotate) service account keys. - -|Service Account User -|`roles/iam.serviceAccountUser` -|Run operations as the service account. - -|=== - -[id="ccs-gcp-iam-group-roles_{context}"] -== IAM group and roles - -The `sd-sre-platform-gcp-access` Google group is granted access to the GCP project to allow Red Hat Site Reliability Engineering (SRE) access to the console for emergency troubleshooting purposes. - -The following roles are attached to the group: - -.IAM roles for sd-sre-platform-gcp-access -[cols="2a,3a,2a",options="header"] - -|=== - -|Role |Console role name |Description - -|Compute Admin -|`roles/compute.admin` -|Provides full control of all Compute Engine resources. - -|Editor -|`roles/editor` -|Provides all viewer permissions, plus permissions for actions that modify state. - -|Organization Policy Viewer -|`roles/orgpolicy.policyViewer` -|Provides access to view Organization Policies on resources. - -|Project IAM Admin -|`roles/resourcemanager.projectIamAdmin` -|Provides permissions to administer IAM policies on projects. - -|Quota Administrator -|`roles/servicemanagement.quotaAdmin` -|Provides access to administer service quotas. - -|Role Administrator -|`roles/iam.roleAdmin` -|Provides access to all custom roles in the project. - -|Service Account Admin -|`roles/iam.serviceAccountAdmin` -|Create and manage service accounts. - - -|Service Usage Admin -|`roles/serviceusage.serviceUsageAdmin` -|Ability to enable, disable, and inspect service states, inspect operations, and consume quota and billing for a consumer project. - -|Tech Support Editor -|`roles/cloudsupport.techSupportEditor` -|Provides full read-write access to technical support cases. - -|=== diff --git a/modules/ccs-gcp-provisioned.adoc b/modules/ccs-gcp-provisioned.adoc deleted file mode 100644 index 9988921dbb25..000000000000 --- a/modules/ccs-gcp-provisioned.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -[id="ccs-gcp-provisioned_{context}"] -= Provisioned GCP Infrastructure - -This is an overview of the provisioned Google Cloud Platform (GCP) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned GCP components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. - -[id="gcp-policy-instances_{context}"] -== Compute instances - -GCP compute instances are required to deploy the control plane and data plane functions of {product-title} in GCP. Instance types might vary for control plane and infrastructure nodes depending on worker node count. - -* Single availability zone -** 2 infra nodes (custom machine type: 4 vCPU and 32 GB RAM) -** 3 control plane nodes (custom machine type: 8 vCPU and 32 GB RAM) -** 2 worker nodes (custom machine type: 4 vCPU and 16 GB RAM) -* Multiple availability zones -** 3 infra nodes (custom machine type: 4 vCPU and 32 GB RAM) -** 3 control plane nodes (custom machine type: 8 vCPU and 32 GB RAM) -** 3 worker nodes (custom machine type: 4 vCPU and 16 GB RAM) - - -[id="gcp-policy-storage_{context}"] -== Storage - -* Infrastructure volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) -** 110 GB Standard persistent disk (kept on instance deletion) -* Worker volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) -* Control plane volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) - -[id="gcp-policy-vpc_{context}"] -== VPC - -* **Subnets:** One master subnet for the control plane workloads and one worker subnet for all others. -* **Router tables:** One global route table per VPC. -* **Internet gateways:** One internet gateway per cluster. -* **NAT gateways:** One master NAT gateway and one worker NAT gateway per cluster. - -[id="gcp-policy-services_{context}"] -== Services - -The following services must be enabled on a GCP CCS cluster: - -* `Deploymentmanager` -* `Compute` -* `Cloudapis` -* `Cloudresourcemanager` -* `DNS` -* `Iamcredentials` -* `IAM` -* `Servicemanagement` -* `Serviceusage` -* `Storage-api` -* `Storage-component` - -[id="gcp-policy-permissions_{context}"] -== Permissions - -The following roles must be added to the support service account: - -* `Compute.admin` -* `Dns.admin` -* `orgpolicy.policyViewer` -* `Owner` -* `resourcemanager.projectIamAdmin` -* `Servicemanagement.admin` -* `serviceusage.serviceUsageAdmin` -* `storage.admin` diff --git a/modules/ccs-gcp-understand.adoc b/modules/ccs-gcp-understand.adoc deleted file mode 100644 index 51e1db886b03..000000000000 --- a/modules/ccs-gcp-understand.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -:_content-type: CONCEPT -[id="ccs-gcp-understand_{context}"] -= Understanding Customer Cloud Subscriptions on GCP - - -Red Hat {product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage {product-title} into a customer's existing {GCP} account. Red Hat requires several prerequisites be met in order to provide this service. - -Red Hat recommends the usage of GCP project, managed by the customer, to organize all of your GCP resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. - -It is recommended for the {product-title} cluster using a CCS model to be hosted in a GCP project within a GCP organization. The Organization resource is the root node of the GCP resource hierarchy and all resources that belong to an organization are grouped under the organization node. An IAM service account with certain roles granted is created and applied to the GCP project. When you make calls to the API, you typically provide service account keys for authentication. Each service account is owned by a specific project, but service accounts can be provided roles to access resources for other projects. diff --git a/modules/cert-manager-about.adoc b/modules/cert-manager-about.adoc deleted file mode 100644 index 983d08b7eec5..000000000000 --- a/modules/cert-manager-about.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-about_{context}"] -= About the {cert-manager-operator} - -The link:https://cert-manager.io/[cert-manager] project introduces certificate authorities and certificates as resource types in the Kubernetes API, which makes it possible to provide certificates on demand to developers working within your cluster. The {cert-manager-operator} provides a supported way to integrate cert-manager into your {product-title} cluster. - -The {cert-manager-operator} provides the following features: - -* Support for integrating with external certificate authorities -* Tools to manage certificates -* Ability for developers to self-serve certificates -* Automatic certificate renewal - -[IMPORTANT] -==== -Do not attempt to use more than one cert-manager Operator in your cluster. If you have a community cert-manager Operator installed in your cluster, you must uninstall it before installing the {cert-manager-operator}. -==== diff --git a/modules/cert-manager-acme-about.adoc b/modules/cert-manager-acme-about.adoc deleted file mode 100644 index e6c3f934e305..000000000000 --- a/modules/cert-manager-acme-about.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-about_{context}"] -= About ACME issuers - -The ACME issuer type for the {cert-manager-operator} represents an Automated Certificate Management Environment (ACME) certificate authority (CA) server. ACME CA servers rely on a _challenge_ to verify that a client owns the domain names that the certificate is being requested for. If the challenge is successful, the {cert-manager-operator} can issue the certificate. If the challenge fails, the {cert-manager-operator} does not issue the certificate. - -[NOTE] -==== -Private DNS zones are not supported with _Let’s Encrypt_ and internet ACME servers. -==== \ No newline at end of file diff --git a/modules/cert-manager-acme-challenges-types.adoc b/modules/cert-manager-acme-challenges-types.adoc deleted file mode 100644 index 55877b3e1958..000000000000 --- a/modules/cert-manager-acme-challenges-types.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-challenges-types_{context}"] -= Supported ACME challenges types - -The {cert-manager-operator} supports the following challenge types for ACME issuers: - -HTTP-01:: With the HTTP-01 challenge type, you provide a computed key at an HTTP URL endpoint in your domain. If the ACME CA server can get the key from the URL, it can validate you as the owner of the domain. -+ -For more information, see link:https://cert-manager.io/docs/configuration/acme/http01/[HTTP01] in the upstream cert-manager documentation. - -DNS-01:: With the DNS-01 challenge type, you provide a computed key at a DNS TXT record. If the ACME CA server can get the key by DNS lookup, it can validate you as the owner of the domain. -+ -For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/[DNS01] in the upstream cert-manager documentation. diff --git a/modules/cert-manager-acme-dns-providers.adoc b/modules/cert-manager-acme-dns-providers.adoc deleted file mode 100644 index fdbd4f713bd8..000000000000 --- a/modules/cert-manager-acme-dns-providers.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-dns-providers_{context}"] -= Supported DNS-01 providers - -The {cert-manager-operator} supports the following DNS-01 providers for ACME issuers: - -* Amazon Route 53 -* Azure DNS -+ -[NOTE] -==== -The {cert-manager-operator} does not support using Azure Active Directory (Azure AD) pod identities to assign a managed identity to a pod. -==== -* Google Cloud DNS diff --git a/modules/cert-manager-acme-dns01-ambient-aws.adoc b/modules/cert-manager-acme-dns01-ambient-aws.adoc deleted file mode 100644 index 7da436477313..000000000000 --- a/modules/cert-manager-acme-dns01-ambient-aws.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-ambient-aws_{context}"] -= Configuring an ACME issuer by using ambient credentials on AWS - -You can use {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using ambient credentials on AWS. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Amazon Route 53. - -.Prerequisites - -* If your cluster is configured to use the AWS Security Token Service (STS), you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift for the AWS Security Token Service cluster_ section. -* If your cluster does not use the AWS STS, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift on AWS_ section. - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check. -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers-only' <2> - - '--dns01-recursive-nameservers=1.1.1.1:53' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. You must use a `1.1.1.1:53` value to avoid the public and private zones overlapping. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer: -+ -[source,terminal] ----- -$ oc new-project ----- - -. Modify the `CertManager` resource to add the `--issuer-ambient-credentials` argument: -+ -[source,terminal] ----- -$ oc patch certmanager/cluster \ - --type=merge \ - -p='{"spec":{"controllerConfig":{"overrideArgs":["--issuer-ambient-credentials"]}}}' ----- - -. Create an issuer: - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: <1> - namespace: <2> -spec: - acme: - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - email: "" <4> - privateKeySecretRef: - name: <5> - solvers: - - dns01: - route53: - hostedZoneID: <6> - region: us-east-1 ----- -<1> Provide a name for the issuer. -<2> Specify the namespace that you created for the issuer. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Replace `` with your email address. -<5> Replace `` with the name of the secret to store the ACME account private key in. -<6> Replace `` with your hosted zone ID. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate: - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: <1> - namespace: <2> -spec: - isCA: false - commonName: '' <3> - secretName: <4> - dnsNames: - - '' <5> - issuerRef: - name: <6> - kind: Issuer ----- -<1> Provide a name for the certificate. -<2> Specify the namespace that you created for the issuer. -<3> Replace `` with your common name (CN). -<4> Specify the name of the secret to create that will contain the certificate. -<5> Replace `` with your domain name. -<6> Specify the name of the issuer that you created. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-acme-dns01-ambient-gcp.adoc b/modules/cert-manager-acme-dns01-ambient-gcp.adoc deleted file mode 100644 index 02dbfb35ccb3..000000000000 --- a/modules/cert-manager-acme-dns01-ambient-gcp.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-ambient-gcp_{context}"] -= Configuring an ACME issuer by using ambient credentials on GCP - -You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using ambient credentials on GCP. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. - -.Prerequisites - -* If your cluster is configured to use GCP Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift with GCP Workload Identity_ section. -* If your cluster does not use GCP Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift on GCP_ section. - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check. -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers-only' <2> - - '--dns01-recursive-nameservers=1.1.1.1:53' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. You must use a `1.1.1.1:53` value to avoid the public and private zones overlapping. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer: -+ -[source,terminal] ----- -$ oc new-project ----- - -. Modify the `CertManager` resource to add the `--issuer-ambient-credentials` argument: -+ -[source,terminal] ----- -$ oc patch certmanager/cluster \ - --type=merge \ - -p='{"spec":{"controllerConfig":{"overrideArgs":["--issuer-ambient-credentials"]}}}' ----- - -. Create an issuer: - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: <1> - namespace: -spec: - acme: - preferredChain: "" - privateKeySecretRef: - name: <2> - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - solvers: - - dns01: - cloudDNS: - project: <4> ----- -<1> Provide a name for the issuer. -<2> Replace `` with the name of the secret to store the ACME account private key in. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Replace `` with the name of the GCP project that contains the Cloud DNS zone. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate: - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: <1> - namespace: -spec: - secretName: <2> - issuerRef: - name: <3> - dnsNames: - - '' <4> ----- -<1> Provide a name for the certificate. -<2> Specify the name of the secret to create that will contain the certificate. -<3> Specify the name of the issuer that you created. -<4> Replace `` with your domain name. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-acme-dns01-explicit-aws.adoc b/modules/cert-manager-acme-dns01-explicit-aws.adoc deleted file mode 100644 index c4bf5c382127..000000000000 --- a/modules/cert-manager-acme-dns01-explicit-aws.adoc +++ /dev/null @@ -1,151 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-explicit-aws_{context}"] -= Configuring an ACME issuer by using explicit credentials for AWS Route53 - -You can use {cert-manager-operator} to set up an Automated Certificate Management Environment (ACME) issuer to solve DNS-01 challenges by using explicit credentials on AWS. This procedure uses _Let's Encrypt_ as the ACME certificate authority (CA) server and shows how to solve DNS-01 challenges with Amazon Route 53. - -.Prerequisites - -* You must provide the explicit `accessKeyID` and `secretAccessKey` credentials. For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/route53/[Route53] in the upstream cert-manager documentation. -+ -[NOTE] -==== -You can use Amazon Route 53 with explicit credentials in an {product-title} cluster that is not running on AWS. -==== - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check. -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers-only' <2> - - '--dns01-recursive-nameservers=1.1.1.1:53' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. You must use a `1.1.1.1:53` value to avoid the public and private zones overlapping. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer: -+ -[source,terminal] ----- -$ oc new-project ----- - -. Create a secret to store your AWS credentials in by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic aws-secret --from-literal=awsSecretAccessKey= \ <1> - -n my-issuer-namespace ----- -<1> Replace `` with your AWS secret access key. - -. Create an issuer: - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: <1> - namespace: <2> -spec: - acme: - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - email: "" <4> - privateKeySecretRef: - name: <5> - solvers: - - dns01: - route53: - accessKeyID: <6> - hostedZoneID: <7> - region: <8> - secretAccessKeySecretRef: - name: "aws-secret" <9> - key: "awsSecretAccessKey" <10> ----- -<1> Provide a name for the issuer. -<2> Specify the namespace that you created for the issuer. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Replace `` with your email address. -<5> Replace `` with the name of the secret to store the ACME account private key in. -<6> Replace `` with your AWS key ID. -<7> Replace `` with your hosted zone ID. -<8> Replace `` with the AWS region name. For example, `us-east-1`. -<9> Specify the name of the secret you created. -<10> Specify the key in the secret you created that stores your AWS secret access key. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate: - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: <1> - namespace: <2> -spec: - isCA: false - commonName: '' <3> - secretName: <4> - dnsNames: - - '' <5> - issuerRef: - name: <6> - kind: Issuer ----- -<1> Provide a name for the certificate. -<2> Specify the namespace that you created for the issuer. -<3> Replace `` with your common name (CN). -<4> Specify the name of the secret to create that will contain the certificate. -<5> Replace `` with your domain name. -<6> Specify the name of the issuer that you created. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- diff --git a/modules/cert-manager-acme-dns01-explicit-azure.adoc b/modules/cert-manager-acme-dns01-explicit-azure.adoc deleted file mode 100644 index 38a41ba28c43..000000000000 --- a/modules/cert-manager-acme-dns01-explicit-azure.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-explicit-azure_{context}"] -= Configuring an ACME issuer by using explicit credentials for Microsoft Azure DNS - -You can use {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using explicit credentials on Microsoft Azure. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Azure DNS. - -.Prerequisites - -* You have set up a service principal with desired role for Azure DNS. For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/azuredns/[Azure DNS] in the upstream cert-manager documentation. -+ -[NOTE] -==== -You can follow this procedure for an {product-title} cluster that is not running on Microsoft Azure. -==== - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check. -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers-only' <2> - - '--dns01-recursive-nameservers=1.1.1.1:53' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. You must use a `1.1.1.1:53` value to avoid the public and private zones overlapping. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer: -+ -[source,terminal] ----- -$ oc new-project my-issuer-namespace ----- - -. Create a secret to store your Azure credentials in by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic --from-literal== \ <1> <2> <3> - -n my-issuer-namespace ----- -<1> Replace `` with your secret name. -<2> Replace `` with your Azure secret access key name. -<3> Replace `` with your Azure secret key. - -. Create an issuer: - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: <1> - namespace: <2> -spec: - acme: - preferredChain: "" - privateKeySecretRef: - name: <3> - server: https://acme-staging-v02.api.letsencrypt.org/directory <4> - solvers: - - dns01: - azureDNS: - clientID: <5> - clientSecretSecretRef: - name: <6> - key: <7> - subscriptionID: <8> - tenantID: <9> - resourceGroupName: <10> - hostedZoneName: <11> - environment: AzurePublicCloud ----- -<1> Provide a name for the issuer. -<2> Replace `` with your issuer namespace. -<3> Replace `` with the name of the secret to store the ACME account private key in. -<4> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<5> Replace `` with your Azure client ID. -<6> Replace `` with a name of the client secret. -<7> Replace `` with the client secret key name. -<8> Replace `` with your Azure subscription ID. -<9> Replace `` with your Azure tenant ID. -<10> Replace `` with the name of the Azure DNS zone resource group. -<11> Replace `` with the name of Azure DNS zone. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate: - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: <1> - namespace: <2> -spec: - secretName: <3> - issuerRef: - name: <4> - dnsNames: - - '' <5> ----- -<1> Provide a name for the certificate. -<2> Replace `` with your issuer namespace. -<3> Specify the name of the secret to create that will contain the certificate. -<4> Specify the name of the issuer that you created. -<5> Replace `` with your domain name. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-acme-dns01-explicit-gcp.adoc b/modules/cert-manager-acme-dns01-explicit-gcp.adoc deleted file mode 100644 index 82e00ea0b491..000000000000 --- a/modules/cert-manager-acme-dns01-explicit-gcp.adoc +++ /dev/null @@ -1,140 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-explicit-gcp_{context}"] -= Configuring an ACME issuer by using explicit credentials for GCP Cloud DNS - -You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using explicit credentials on GCP. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. - -.Prerequisites - -* You have set up Google Cloud service account with a desired role for Google CloudDNS. For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/google/[Google CloudDNS] in the upstream cert-manager documentation. -+ -[NOTE] -==== -You can use Google CloudDNS with explicit credentials in an {product-title} cluster that is not running on GCP. -==== - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check. -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers-only' <2> - - '--dns01-recursive-nameservers=1.1.1.1:53' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. You must use a `1.1.1.1:53` value to avoid the public and private zones overlapping. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer: -+ -[source,terminal] ----- -$ oc new-project my-issuer-namespace ----- - -. Create a secret to store your GCP credentials by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic clouddns-dns01-solver-svc-acct --from-file=service_account.json= -n my-issuer-namespace ----- - -. Create an issuer: - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: <1> - namespace: <2> -spec: - acme: - preferredChain: "" - privateKeySecretRef: - name: <3> - server: https://acme-staging-v02.api.letsencrypt.org/directory <4> - solvers: - - dns01: - cloudDNS: - project: <5> - serviceAccountSecretRef: - name: clouddns-dns01-solver-svc-acct <6> - key: service_account.json <7> ----- -<1> Provide a name for the issuer. -<2> Replace `` with your issuer namespace. -<3> Replace `` with the name of the secret to store the ACME account private key in. -<4> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<5> Replace `` with the name of the GCP project that contains the Cloud DNS zone. -<6> Specify the name of the secret you created. -<7> Specify the key in the secret you created that stores your GCP secret access key. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate: - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: <1> - namespace: <2> -spec: - secretName: <3> - issuerRef: - name: issuer-acme-dns01-clouddns <4> - dnsNames: - - '' <5> ----- -<1> Provide a name for the certificate. -<2> Replace `` with your issuer namespace. -<3> Specify the name of the secret to create that will contain the certificate. -<4> Specify the name of the issuer that you created. -<5> Replace `` with your domain name. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-acme-http01.adoc b/modules/cert-manager-acme-http01.adoc deleted file mode 100644 index 4aee84d177ca..000000000000 --- a/modules/cert-manager-acme-http01.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-http01_{context}"] -= Configuring an ACME issuer to solve HTTP-01 challenges - -You can use {cert-manager-operator} to set up an ACME issuer to solve HTTP-01 challenges. This procedure uses _Let's Encrypt_ as the ACME CA server. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have a service that you want to expose. In this procedure, the service is named `sample-workload`. - -.Procedure - -. Create an ACME cluster issuer. - -.. Create a YAML file that defines the `ClusterIssuer` object: -+ -.Example `acme-cluster-issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-staging <1> -spec: - acme: - preferredChain: "" - privateKeySecretRef: - name: <2> - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - solvers: - - http01: - ingress: - class: openshift-default <4> ----- -<1> Provide a name for the cluster issuer. -<2> Replace `` with the name of secret to store the ACME account private key in. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Specify the Ingress class. - -.. Create the `ClusterIssuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f acme-cluster-issuer.yaml ----- - -. Create an Ingress to expose the service of the user workload. - -.. Create a YAML file that defines a `Namespace` object: -+ -.Example `namespace.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: my-ingress-namespace <1> ----- -<1> Specify the namespace for the Ingress. - -.. Create the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f namespace.yaml ----- - -.. Create a YAML file that defines the `Ingress` object: -+ -.Example `ingress.yaml` file -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: sample-ingress <1> - namespace: my-ingress-namespace <2> - annotations: - cert-manager.io/cluster-issuer: letsencrypt-staging <3> - acme.cert-manager.io/http01-ingress-class: openshift-default <4> -spec: - ingressClassName: openshift-default <5> - tls: - - hosts: - - <6> - secretName: sample-tls <7> - rules: - - host: <8> - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: sample-workload <9> - port: - number: 80 ----- -<1> Specify the name of the Ingress. -<2> Specify the namespace that you created for the Ingress. -<3> Specify the cluster issuer that you created. -<4> Specify the Ingress class. -<5> Specify the Ingress class. -<6> Replace `` with the Subject Alternative Name to be associated with the certificate. This name is used to add DNS names to the certificate. -<7> Specify the secret to store the created certificate in. -<8> Replace `` with the hostname. You can use the `.` syntax to take advantage of the `*.` wildcard DNS record and serving certificate for the cluster. For example, you might use `apps.`. Otherwise, you must ensure that a DNS record exists for the chosen hostname. -<9> Specify the name of the service to expose. This example uses a service named `sample-workload`. - -.. Create the `Ingress` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f ingress.yaml ----- diff --git a/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc b/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc deleted file mode 100644 index 91efdaee0e2c..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-aws.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-aws-non-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} on AWS - -To configure the cloud credentials for the {cert-manager-operator} on the AWS cluster you must generate the cloud credentials secret by creating a `CredentialsRequest` object, and allowing the Cloud Credential Operator. - -.Prerequisites - -* You have installed the {cert-manager-operator} 1.11.1 or later. -* You have configured the Cloud Credential Operator to operate in _mint_ or _passthrough_ mode. - -.Procedure - -. Create a `CredentialsRequest` resource YAML file, for example, `sample-credential-request.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - "route53:GetChange" - effect: Allow - resource: "arn:aws:route53:::change/*" - - action: - - "route53:ChangeResourceRecordSets" - - "route53:ListResourceRecordSets" - effect: Allow - resource: "arn:aws:route53:::hostedzone/*" - - action: - - "route53:ListHostedZonesByName" - effect: Allow - resource: "*" - secretRef: - name: aws-creds - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- - -. Create a `CredentialsRequest` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-credential-request.yaml ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"aws-creds"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with AWS credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -spec: - containers: - - args: - ... - - mountPath: /.aws - name: cloud-credentials - ... - volumes: - ... - - name: cloud-credentials - secret: - ... - secretName: aws-creds ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc b/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc deleted file mode 100644 index 8365a4fdd72c..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-aws.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-aws-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} for the AWS Security Token Service cluster - -To configure the cloud credentials for the {cert-manager-operator} on the AWS Security Token Service (STS) cluster with the cloud credentials. You must generate the cloud credentials manually, and apply it on the cluster by using the `ccoctl` binary. - -.Prerequisites - -* You have extracted and prepared the `ccoctl` binary. -* You have configured an {product-title} cluster with AWS STS by using the Cloud Credential Operator in manual mode. - -.Procedure - -. Create a directory to store a `CredentialsRequest` resource YAML file by running the following command: -+ -[source,terminal] ----- -$ mkdir credentials-request ----- - -. Create a `CredentialsRequest` resource YAML file under the `credentials-request` directory, such as, `sample-credential-request.yaml`, by applying the following yaml: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - "route53:GetChange" - effect: Allow - resource: "arn:aws:route53:::change/*" - - action: - - "route53:ChangeResourceRecordSets" - - "route53:ListResourceRecordSets" - effect: Allow - resource: "arn:aws:route53:::hostedzone/*" - - action: - - "route53:ListHostedZonesByName" - effect: Allow - resource: "*" - secretRef: - name: aws-creds - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- - -. Use the `ccoctl` tool to process `CredentialsRequest` objects by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name --region= \ - --credentials-requests-dir= \ - --identity-provider-arn --output-dir= ----- -+ -.Example output -[source,terminal] ----- -2023/05/15 18:10:34 Role arn:aws:iam::XXXXXXXXXXXX:role/-cert-manager-aws-creds created -2023/05/15 18:10:34 Saved credentials configuration to: /manifests/cert-manager-aws-creds-credentials.yaml -2023/05/15 18:10:35 Updated Role policy for Role -cert-manager-aws-creds ----- -+ -Copy the `` from the output to use in the next step. For example, `"arn:aws:iam::XXXXXXXXXXXX:role/-cert-manager-aws-creds"` - -. Add the `eks.amazonaws.com/role-arn=""` annotation to the service account by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager annotate serviceaccount cert-manager eks.amazonaws.com/role-arn="" ----- - -. To create a new pod, delete the existing cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc delete pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -The AWS credentials are applied to a new cert-manager controller pod within a minute. - -.Verification - -. Get the name of the updated cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 39s ----- - -. Verify that AWS credentials are updated by running the following command: -+ -[source,terminal] ----- -$ oc set env -n cert-manager po/ --list ----- -+ -.Example output -[source,terminal] ----- -# pods/cert-manager-57f9555c54-vbcpg, container cert-manager-controller -# POD_NAMESPACE from field path metadata.namespace -AWS_ROLE_ARN=XXXXXXXXXXXX -AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc deleted file mode 100644 index 7b03f29456e5..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-non-sts-gcp.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-gcp-non-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} on GCP - -To configure the cloud credentials for the {cert-manager-operator} on a GCP cluster you must create a `CredentialsRequest` object, and allow the Cloud Credential Operator to generate the cloud credentials secret. - -.Prerequisites - -* You have installed the {cert-manager-operator} 1.11.1 or later. -* You have configured the Cloud Credential Operator to operate in _mint_ or _passthrough_ mode. - -.Procedure - -. Create a `CredentialsRequest` resource YAML file, such as, `sample-credential-request.yaml` by applying the following yaml: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: GCPProviderSpec - predefinedRoles: - - roles/dns.admin - secretRef: - name: gcp-credentials - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- -+ -[NOTE] -==== -The `dns.admin` role provides admin privileges to the service account for managing Google Cloud DNS resources. To ensure that the cert-manager runs with the service account that has the least privilege, you can create a custom role with the following permissions: - -* `dns.resourceRecordSets.*` -* `dns.changes.*` -* `dns.managedZones.list` -==== - -. Create a `CredentialsRequest` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-credential-request.yaml ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"gcp-credentials"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with GCP credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -spec: - containers: - - args: - ... - volumeMounts: - ... - - mountPath: /.config/gcloud - name: cloud-credentials - .... - volumes: - ... - - name: cloud-credentials - secret: - ... - items: - - key: service_account.json - path: application_default_credentials.json - secretName: gcp-credentials ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc deleted file mode 100644 index c155d3c261d0..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-gcp.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-gcp-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} with GCP Workload Identity - -Generate the cloud credentials for the {cert-manager-operator} by using the `ccoctl` binary. Then, apply them to the GCP Workload Identity cluster. - -.Prerequisites - -* You extracted and prepared the `ccoctl` binary. -* The {cert-manager-operator} 1.11.1 or later is installed. -* You have configured an {product-title} cluster with GCP Workload Identity by using the Cloud Credential Operator in a manual mode. - -.Procedure - -. Create a directory to store a `CredentialsRequest` resource YAML file by running the following command: -+ -[source,terminal] ----- -$ mkdir credentials-request ----- - -. In the `credentials-request` directory, create a YAML file that contains the following `CredentialsRequest` manifest: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: GCPProviderSpec - predefinedRoles: - - roles/dns.admin - secretRef: - name: gcp-credentials - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- -+ -[NOTE] -==== -The `dns.admin` role provides admin privileges to the service account for managing Google Cloud DNS resources. To ensure that the cert-manager runs with the service account that has the least privilege, you can create a custom role with the following permissions: - -* `dns.resourceRecordSets.*` -* `dns.changes.*` -* `dns.managedZones.list` -==== - -. Use the `ccoctl` tool to process `CredentialsRequest` objects by running the following command: -+ -[source,terminal] ----- -$ ccoctl gcp create-service-accounts \ - --name --output-dir= \ - --credentials-requests-dir= \ - --workload-identity-pool \ - --workload-identity-provider \ - --project ----- -+ -.Example command -[source,terminal] ----- -$ ccoctl gcp create-service-accounts \ - --name abcde-20230525-4bac2781 --output-dir=/home/outputdir \ - --credentials-requests-dir=/home/credentials-requests \ - --workload-identity-pool abcde-20230525-4bac2781 \ - --workload-identity-provider abcde-20230525-4bac2781 \ - --project openshift-gcp-devel ----- - -. Apply the secrets generated in the manifests directory of your cluster by running the following command: -+ -[source,terminal] ----- -$ ls /manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"gcp-credentials"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with GCP workload identity credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -spec: - containers: - - args: - ... - volumeMounts: - - mountPath: /var/run/secrets/openshift/serviceaccount - name: bound-sa-token - ... - - mountPath: /.config/gcloud - name: cloud-credentials - ... - volumes: - - name: bound-sa-token - projected: - ... - sources: - - serviceAccountToken: - audience: openshift - ... - path: token - - name: cloud-credentials - secret: - ... - items: - - key: service_account.json - path: application_default_credentials.json - secretName: gcp-credentials ----- \ No newline at end of file diff --git a/modules/cert-manager-enable-metrics.adoc b/modules/cert-manager-enable-metrics.adoc deleted file mode 100644 index 4329a3d67426..000000000000 --- a/modules/cert-manager-enable-metrics.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-monitoring.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-metrics_{context}"] -= Enabling monitoring by using a service monitor for the {cert-manager-operator} - -You can enable monitoring and metrics collection for the {cert-manager-operator} by using a service monitor to perform the custom metrics scraping. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* The {cert-manager-operator} is installed. - -.Procedure - -. Add the label to enable cluster monitoring by running the following command: -+ -[source,terminal] ----- -$ oc label namespace cert-manager openshift.io/cluster-monitoring=true ----- - -. Enable monitoring for user-defined projects. See _Enabling monitoring for user-defined projects_ for instructions. - -. Create a service monitor: - -.. Create a YAML file that defines the `ServiceMonitor` object: -+ -.Example `service-monitor.yaml` file -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cert-manager - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager - name: cert-manager - namespace: cert-manager -spec: - endpoints: - - interval: 30s - port: tcp-prometheus-servicemonitor - scheme: http - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager ----- - -.. Create the `ServiceMonitor` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f service-monitor.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-enable-operand-log-level.adoc b/modules/cert-manager-enable-operand-log-level.adoc deleted file mode 100644 index eb4676bf2332..000000000000 --- a/modules/cert-manager-enable-operand-log-level.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-log-levels.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-operand-log-level_{context}"] -= Setting a log level for cert-manager - -You can set a log level for cert-manager to determine the verbosity of log messages. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have installed the {cert-manager-operator} 1.11.1 or later. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager.operator cluster ----- - -. Set the log level value by editing the `spec.logLevel` section: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -... -spec: - logLevel: Normal <1> ----- -<1> The default `logLevel` is `Normal`. Replace `Normal` with the desired log level value. The valid log level values for the `CertManager` resource are `Normal`, `Debug`, `Trace`, and `TraceAll`. To audit logs and perform common operations when everything is fine, set `logLevel` to `Normal` . To troubleshoot a minor issue by viewing verbose logs, set `logLevel` to `Debug` . To troubleshoot a major issue by viewing more verbose logs, you can set `logLevel` to `Trace`. To troubleshoot serious issues, set `logLevel` to `TraceAll`. -+ -[NOTE] -==== -`TraceAll` generates huge amount of logs. After setting `logLevel` to `TraceAll`, you might experience performance issues. -==== - -. Save your changes and quit the text editor to apply your changes. -+ -After applying the changes, the verbosity level for the cert-manager components controller, CA injector, and webhook is updated. \ No newline at end of file diff --git a/modules/cert-manager-enable-operator-log-level.adoc b/modules/cert-manager-enable-operator-log-level.adoc deleted file mode 100644 index a2263ef1f2dd..000000000000 --- a/modules/cert-manager-enable-operator-log-level.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-log-levels.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-operator-log-level_{context}"] -= Setting a log level for the {cert-manager-operator} - -You can set a log level for the {cert-manager-operator} to determine the verbosity of the operator log messages. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have installed the {cert-manager-operator} 1.11.1 or later. - -.Procedure - -* Update the subscription object for {cert-manager-operator} to provide the verbosity level for the operator logs by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"OPERATOR_LOG_LEVEL","value":"v"}]}}}' <1> ----- -<1> Replace `v` with the desired log level number. The valid values for `v` can range from `1`to `10`. The default value is `2`. - -.Verification - -. The cert-manager Operator pod is redeployed. Verify that the log level of the {cert-manager-operator} is updated by running the following command: -+ -[source,terminal] ----- -$ oc set env deploy/cert-manager-operator-controller-manager -n cert-manager-operator --list | grep -e OPERATOR_LOG_LEVEL -e container ----- -+ -.Example output -[source,terminal] ----- -# deployments/cert-manager-operator-controller-manager, container kube-rbac-proxy -OPERATOR_LOG_LEVEL=9 -# deployments/cert-manager-operator-controller-manager, container cert-manager-operator -OPERATOR_LOG_LEVEL=9 ----- - -. Verify that the log level of the {cert-manager-operator} is updated by running the `oc logs` command: -+ -[source,terminal] ----- -$ oc logs deploy/cert-manager-operator-controller-manager -n cert-manager-operator ----- \ No newline at end of file diff --git a/modules/cert-manager-install-console.adoc b/modules/cert-manager-install-console.adoc deleted file mode 100644 index 4267edae6d3d..000000000000 --- a/modules/cert-manager-install-console.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-install.adoc - -:_content-type: PROCEDURE -[id="cert-manager-install-console_{context}"] -= Installing the {cert-manager-operator} using the web console - -You can use the web console to install the {cert-manager-operator}. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Navigate to *Operators* -> *OperatorHub*. - -. Enter *{cert-manager-operator}* into the filter box. - -. Select the *{cert-manager-operator}* and click *Install*. - -. On the *Install Operator* page: -.. Update the *Update channel*, if necessary. The channel defaults to *stable-v1*, which installs the latest stable release of the {cert-manager-operator}. -.. Choose the *Installed Namespace* for the Operator. The default Operator namespace is `cert-manager-operator`. -+ -If the `cert-manager-operator` namespace does not exist, it is created for you. - -.. Select an *Update approval* strategy. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.Verification - -. Navigate to *Operators* -> *Installed Operators*. -. Verify that *{cert-manager-operator}* is listed with a *Status* of *Succeeded* in the `cert-manager-operator` namespace. -. Verify that cert-manager pods are up and running by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 3m39s -cert-manager-cainjector-56cc5f9868-7g9z7 1/1 Running 0 4m5s -cert-manager-webhook-d4f79d7f7-9dg9w 1/1 Running 0 4m9s ----- -+ -You can use the {cert-manager-operator} only after cert-manager pods are up and running. \ No newline at end of file diff --git a/modules/cert-manager-issuer-types.adoc b/modules/cert-manager-issuer-types.adoc deleted file mode 100644 index 5736a227e8a0..000000000000 --- a/modules/cert-manager-issuer-types.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-issuer-types_{context}"] -= Supported issuer types - -The {cert-manager-operator} supports the following issuer types: - -* Automated Certificate Management Environment (ACME) -* Certificate authority (CA) -* Self-signed -* link:https://cert-manager.io/docs/configuration/vault/[Vault] diff --git a/modules/cert-manager-override-arguments.adoc b/modules/cert-manager-override-arguments.adoc deleted file mode 100644 index 7080c8a8750e..000000000000 --- a/modules/cert-manager-override-arguments.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-customizing-api-fields.adoc - -:_content-type: PROCEDURE -[id="cert-manager-override-arguments_{context}"] -= Customizing cert-manager by overriding arguments from the cert-manager Operator API - -You can override the supported arguments for the {cert-manager-operator} by adding a `spec.controllerConfig` section in the `CertManager` resource. - -.Prerequisites - -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: - overrideArgs: - - '--dns01-recursive-nameservers=:' <1> - - '--dns01-recursive-nameservers-only' <2> - - '--acme-http01-solver-nameservers=:' <3> - - '--v=' <4> - - '--metrics-listen-address=:' <5> - - '--issuer-ambient-credentials' <6> - webhookConfig: - overrideArgs: - - '--v=4' <4> - cainjectorConfig: - overrideArgs: - - '--v=2' <4> ----- -<1> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. For example, `--dns01-recursive-nameservers=1.1.1.1:53`. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the Automated Certificate Management Environment (ACME) HTTP01 self check. For example, `--acme-http01-solver-nameservers=1.1.1.1:53`. -<4> Specify to set the log level verbosity to determine the verbosity of log messages. -<5> Specify the host and port for the metrics endpoint. The default value is `--metrics-listen-address=0.0.0.0:9402`. -<6> You must use the `--issuer-ambient-credentials` argument when configuring an ACME Issuer to solve DNS-01 challenges by using ambient credentials. - -. Save your changes and quit the text editor to apply your changes. - -.Verification - -* Verify that arguments are updated for cert-manager pods by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n cert-manager -o yaml ----- -+ -.Example output -[source,yaml] ----- -... - metadata: - name: cert-manager-6d4b5d4c97-kldwl - namespace: cert-manager -... - spec: - containers: - - args: - - --acme-http01-solver-nameservers=1.1.1.1:53 - - --cluster-resource-namespace=$(POD_NAMESPACE) - - --dns01-recursive-nameservers=1.1.1.1:53 - - --dns01-recursive-nameservers-only - - --leader-election-namespace=kube-system - - --max-concurrent-challenges=60 - - --metrics-listen-address=0.0.0.0:9042 - - --v=6 -... - metadata: - name: cert-manager-cainjector-866c4fd758-ltxxj - namespace: cert-manager -... - spec: - containers: - - args: - - --leader-election-namespace=kube-system - - --v=2 -... - metadata: - name: cert-manager-webhook-6d48f88495-c88gd - namespace: cert-manager -... - spec: - containers: - - args: - ... - - --v=4 ----- \ No newline at end of file diff --git a/modules/cert-manager-override-environment-variables.adoc b/modules/cert-manager-override-environment-variables.adoc deleted file mode 100644 index a54fd7195e5e..000000000000 --- a/modules/cert-manager-override-environment-variables.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-customizing-api-fields.adoc - -:_content-type: PROCEDURE -[id="cert-manager-override-environment-variables_{context}"] -= Customizing cert-manager by overriding environment variables from the cert-manager Operator API - -You can override the supported environment variables for the {cert-manager-operator} by adding a `spec.controllerConfig` section in the `CertManager` resource. - -.Prerequisites - -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: - overrideEnv: - - name: HTTP_PROXY - value: http:// <1> - - name: HTTPS_PROXY - value: https:// <1> - - name: NO_PROXY - value: <2> ----- -<1> Replace `` with the proxy server URL. -<2> Replace `` with a comma separated list of domains. These domains are ignored by the proxy server. - -. Save your changes and quit the text editor to apply your changes. - -.Verification - -. Verify that the cert-manager controller pod is redeployed by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 39s ----- - -. Verify that environment variables are updated for the cert-manager pod by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n cert-manager -o yaml ----- -+ -.Example output -[source,yaml] ----- - env: - ... - - name: HTTP_PROXY - value: http:// - - name: HTTPS_PROXY - value: https:// - - name: NO_PROXY - value: ----- \ No newline at end of file diff --git a/modules/cert-manager-proxy-support.adoc b/modules/cert-manager-proxy-support.adoc deleted file mode 100644 index cfbd5d29f7d1..000000000000 --- a/modules/cert-manager-proxy-support.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-proxy.adoc - -:_content-type: PROCEDURE -[id="cert-manager-proxy-support_{context}"] -= Injecting a custom CA certificate for the {cert-manager-operator} - -If your {product-title} cluster has the cluster-wide proxy enabled, you can inject any CA certificates that are required for proxying HTTPS connections into the {cert-manager-operator}. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have enabled the cluster-wide proxy for {product-title}. - -.Procedure - -. Create a config map in the `cert-manager` namespace by running the following command: -+ -[source,terminal] ----- -$ oc create configmap trusted-ca -n cert-manager ----- - -. Inject the CA bundle that is trusted by {product-title} into the config map by running the following command: -+ -[source,terminal] ----- -$ oc label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true -n cert-manager ----- - -. Update the deployment for the {cert-manager-operator} to use the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}]}}}' ----- - -.Verification - -. Verify that the deployments have finished rolling out by running the following command: -+ -[source,terminal] ----- -$ oc rollout status deployment/cert-manager-operator-controller-manager -n cert-manager-operator && \ -oc rollout status deployment/cert-manager -n cert-manager && \ -oc rollout status deployment/cert-manager-webhook -n cert-manager && \ -oc rollout status deployment/cert-manager-cainjector -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -deployment "cert-manager-operator-controller-manager" successfully rolled out -deployment "cert-manager" successfully rolled out -deployment "cert-manager-webhook" successfully rolled out -deployment "cert-manager-cainjector" successfully rolled out ----- - -. Verify that the CA bundle was mounted as a volume by running the following command: -+ -[source,terminal] ----- -$ oc get deployment cert-manager -n cert-manager -o=jsonpath={.spec.template.spec.'containers[0].volumeMounts'} ----- -+ -.Example output -[source,terminal] ----- -[{"mountPath":"/etc/pki/tls/certs/cert-manager-tls-ca-bundle.crt","name":"trusted-ca","subPath":"ca-bundle.crt"}] ----- - -. Verify that the source of the CA bundle is the `trusted-ca` config map by running the following command: -+ -[source,terminal] ----- -$ oc get deployment cert-manager -n cert-manager -o=jsonpath={.spec.template.spec.volumes} ----- -+ -.Example output -[source,terminal] ----- -[{"configMap":{"defaultMode":420,"name":"trusted-ca"},"name":"trusted-ca"}] ----- diff --git a/modules/cert-manager-query-metrics.adoc b/modules/cert-manager-query-metrics.adoc deleted file mode 100644 index 0cb178d36a84..000000000000 --- a/modules/cert-manager-query-metrics.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-monitoring.adoc - -:_content-type: PROCEDURE -[id="cert-manager-query-metrics_{context}"] -= Querying metrics for the {cert-manager-operator} - -After you have enabled monitoring for the {cert-manager-operator}, you can query its metrics by using the {product-title} web console. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the {cert-manager-operator}. -* You have enabled monitoring and metrics collection for the {cert-manager-operator}. - -.Procedure - -. From the {product-title} web console, navigate to *Observe* -> *Metrics*. - -. Add a query by using one of the following formats: - -** Specify the endpoints: -+ -[source,promql] ----- -{instance=""} <1> ----- -<1> Replace `` with the value of the endpoint for the `cert-manager` service. You can find the endpoint value by running the following command: `oc describe service cert-manager -n cert-manager`. - -** Specify the `tcp-prometheus-servicemonitor` port: -+ -[source,promql] ----- -{endpoint="tcp-prometheus-servicemonitor"} ----- \ No newline at end of file diff --git a/modules/cert-manager-remove-resources-console.adoc b/modules/cert-manager-remove-resources-console.adoc deleted file mode 100644 index eb0d751fe968..000000000000 --- a/modules/cert-manager-remove-resources-console.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert-manager-operator-uninstall.adoc - -:_content-type: PROCEDURE -[id="cert-manager-remove-resources-console_{context}"] -= Removing {cert-manager-operator} resources - -Once you have uninstalled the {cert-manager-operator}, you have the option to eliminate its associated resources from your cluster. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Remove the deployments of the cert-manager components, such as `cert-manager`, `cainjector`, and `webhook`, present in the `cert-manager` namespace. - -.. Click the *Project* drop-down menu to see a list of all available projects, and select the *cert-manager* project. - -.. Navigate to *Workloads* -> *Deployments*. - -.. Select the deployment that you want to delete. - -.. Click the *Actions* drop-down menu, and select *Delete Deployment* to see a confirmation dialog box. - -.. Click *Delete* to delete the deployment. - -.. Alternatively, delete deployments of the cert-manager components such as `cert-manager`, `cainjector` and `webhook` present in the `cert-manager` namespace by using the command-line interface (CLI). -+ -[source,terminal] ----- -$ oc delete deployment -n cert-manager -l app.kubernetes.io/instance=cert-manager ----- - -. Optional: Remove the custom resource definitions (CRDs) that were installed by the {cert-manager-operator}: - -.. Navigate to *Administration* -> *CustomResourceDefinitions*. - -.. Enter `certmanager` in the *Name* field to filter the CRDs. - -.. Click the Options menu {kebab} next to each of the following CRDs, and select *Delete Custom Resource Definition*: - -*** `Certificate` -*** `CertificateRequest` -*** `CertManager` (`operator.openshift.io`) -*** `Challenge` -*** `ClusterIssuer` -*** `Issuer` -*** `Order` - -. Optional: Remove the `cert-manager-operator` namespace. -.. Navigate to *Administration* -> *Namespaces*. -.. Click the Options menu {kebab} next to the *cert-manager-operator* and select *Delete Namespace*. -.. In the confirmation dialog, enter `cert-manager-operator` in the field and click *Delete*. \ No newline at end of file diff --git a/modules/cert-manager-request-methods.adoc b/modules/cert-manager-request-methods.adoc deleted file mode 100644 index 4d972a7c64a5..000000000000 --- a/modules/cert-manager-request-methods.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-request-methods_{context}"] -= Certificate request methods - -There are two ways to request a certificate using the {cert-manager-operator}: - -Using the `cert-manager.io/CertificateRequest` object:: With this method a service developer creates a `CertificateRequest` object with a valid `issuerRef` pointing to a configured issuer (configured by a service infrastructure administrator). A service infrastructure administrator then accepts or denies the certificate request. Only accepted certificate requests create a corresponding certificate. - -Using the `cert-manager.io/Certificate` object:: With this method, a service developer creates a `Certificate` object with a valid `issuerRef` and obtains a certificate from a secret that they pointed to the `Certificate` object. diff --git a/modules/cert-manager-uninstall-console.adoc b/modules/cert-manager-uninstall-console.adoc deleted file mode 100644 index 32b069abc629..000000000000 --- a/modules/cert-manager-uninstall-console.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-uninstall.adoc - -:_content-type: PROCEDURE -[id="cert-manager-uninstall-console_{context}"] -= Uninstalling the {cert-manager-operator} - -You can uninstall the {cert-manager-operator} by using the web console. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. -* The {cert-manager-operator} is installed. -// TODO: Any other prereqs, like removing anything that is using it? - -.Procedure - -. Log in to the {product-title} web console. -. Uninstall the {cert-manager-operator} Operator. -.. Navigate to *Operators* -> *Installed Operators*. -.. Click the Options menu {kebab} next to the *{cert-manager-operator}* entry and click *Uninstall Operator*. -.. In the confirmation dialog, click *Uninstall*. diff --git a/modules/certificate-injection-using-operators.adoc b/modules/certificate-injection-using-operators.adoc deleted file mode 100644 index 17e526a88a87..000000000000 --- a/modules/certificate-injection-using-operators.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-a-custom-pki.adoc - -[id="certificate-injection-using-operators_{context}"] -= Certificate injection using Operators - -Once your custom CA certificate is added to the cluster via ConfigMap, the -Cluster Network Operator merges the user-provided and system CA certificates -into a single bundle and injects the merged bundle into the Operator requesting -the trust bundle injection. - -[IMPORTANT] -==== -After adding a `config.openshift.io/inject-trusted-cabundle="true"` label to the config map, existing data in it is deleted. The Cluster Network Operator takes ownership of a config map and only accepts `ca-bundle` as data. -You must use a separate config map to store `service-ca.crt` by using the `service.beta.openshift.io/inject-cabundle=true` annotation or a similar configuration. Adding a `config.openshift.io/inject-trusted-cabundle="true"` label and `service.beta.openshift.io/inject-cabundle=true` annotation on the same config map can cause issues. -==== - -Operators request this injection by creating an empty ConfigMap with the -following label: - -[source,yaml] ----- -config.openshift.io/inject-trusted-cabundle="true" ----- - -An example of the empty ConfigMap: -[source,yaml] ----- -apiVersion: v1 -data: {} -kind: ConfigMap -metadata: - labels: - config.openshift.io/inject-trusted-cabundle: "true" - name: ca-inject <1> - namespace: apache ----- -<1> Specifies the empty ConfigMap name. - -The Operator mounts this ConfigMap into the container's local trust store. - -[NOTE] -==== -Adding a trusted CA certificate is only needed if the certificate is not -included in the {op-system-first} trust bundle. -==== - -Certificate injection is not limited to Operators. The Cluster Network Operator -injects certificates across any namespace when an empty ConfigMap is created with the -`config.openshift.io/inject-trusted-cabundle=true` label. - -The ConfigMap can reside in any namespace, but the ConfigMap must be mounted as -a volume to each container within a pod that requires a custom CA. For example: - -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-example-custom-ca-deployment - namespace: my-example-custom-ca-ns -spec: - ... - spec: - ... - containers: - - name: my-container-that-needs-custom-ca - volumeMounts: - - name: trusted-ca - mountPath: /etc/pki/ca-trust/extracted/pem - readOnly: true - volumes: - - name: trusted-ca - configMap: - name: trusted-ca - items: - - key: ca-bundle.crt <1> - path: tls-ca-bundle.pem <2> ----- -<1> `ca-bundle.crt` is required as the ConfigMap key. -<2> `tls-ca-bundle.pem` is required as the ConfigMap path. diff --git a/modules/checking-cluster-resource-availability-and-utilization.adoc b/modules/checking-cluster-resource-availability-and-utilization.adoc deleted file mode 100644 index 8ace6a550f11..000000000000 --- a/modules/checking-cluster-resource-availability-and-utilization.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/validating-an-installation.adoc - -:_content-type: PROCEDURE -[id="checking-cluster-resource-availability-and-utilization_{context}"] -= Checking cluster resource availability and utilization - -{product-title} provides a comprehensive set of monitoring dashboards that help you understand the state of cluster components. - -In the *Administrator* perspective, you can access dashboards for core {product-title} components, including: - -* etcd - -* Kubernetes compute resources - -* Kubernetes network resources - -* Prometheus - -* Dashboards relating to cluster and node performance - -.Example compute resources dashboard -image::monitoring-dashboard-compute-resources.png[] - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. In the *Administrator* perspective in the {product-title} web console, navigate to *Observe* -> *Dashboards*. - -. Choose a dashboard in the *Dashboard* list. Some dashboards, such as the *etcd* dashboard, produce additional sub-menus when selected. - -. Optional: Select a time range for the graphs in the *Time Range* list. -+ -** Select a pre-defined time period. -+ -** Set a custom time range by selecting *Custom time range* in the *Time Range* list. -+ -.. Input or select the *From* and *To* dates and times. -+ -.. Click *Save* to save the custom time range. - -. Optional: Select a *Refresh Interval*. - -. Hover over each of the graphs within a dashboard to display detailed information about specific items. diff --git a/modules/checking-file-intergrity-cr-status.adoc b/modules/checking-file-intergrity-cr-status.adoc deleted file mode 100644 index 15dcf51c86b7..000000000000 --- a/modules/checking-file-intergrity-cr-status.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -:_content-type: PROCEDURE -[id="checking-the-file-integrity-CR-status_{context}"] -= Checking the FileIntegrity custom resource status - -The `FileIntegrity` custom resource (CR) reports its status through the .`status.phase` subresource. - -.Procedure - -* To query the `FileIntegrity` CR status, run: -+ -[source,terminal] ----- -$ oc get fileintegrities/worker-fileintegrity -o jsonpath="{ .status.phase }" ----- -+ -.Example output -[source,terminal] ----- -Active ----- diff --git a/modules/checking-load-balancer-configuration.adoc b/modules/checking-load-balancer-configuration.adoc deleted file mode 100644 index c60a997a5807..000000000000 --- a/modules/checking-load-balancer-configuration.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="checking-load-balancer-configuration_{context}"] -= Checking a load balancer configuration before {product-title} installation - -Check your load balancer configuration prior to starting an {product-title} installation. - -.Prerequisites - -* You have configured an external load balancer of your choosing, in preparation for an {product-title} installation. The following example is based on a {op-system-base-full} host using HAProxy to provide load balancing services to a cluster. -* You have configured DNS in preparation for an {product-title} installation. -* You have SSH access to your load balancer. - -.Procedure - -. Check that the `haproxy` systemd service is active: -+ -[source,terminal] ----- -$ ssh @ systemctl status haproxy ----- - -. Verify that the load balancer is listening on the required ports. The following example references ports `80`, `443`, `6443`, and `22623`. -+ -* For HAProxy instances running on {op-system-base-full} 6, verify port status by using the `netstat` command: -+ -[source,terminal] ----- -$ ssh @ netstat -nltupe | grep -E ':80|:443|:6443|:22623' ----- -+ -* For HAProxy instances running on {op-system-base-full} 7 or 8, verify port status by using the `ss` command: -+ -[source,terminal] ----- -$ ssh @ ss -nltupe | grep -E ':80|:443|:6443|:22623' ----- -+ -[NOTE] -==== -Red Hat recommends the `ss` command instead of `netstat` in {op-system-base-full} 7 or later. `ss` is provided by the iproute package. For more information on the `ss` command, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/performance_tuning_guide/sect-red_hat_enterprise_linux-performance_tuning_guide-tool_reference-ss[{op-system-base-full} 7 Performance Tuning Guide]. -==== -+ -. Check that the wildcard DNS record resolves to the load balancer: -+ -[source,terminal] ----- -$ dig @ ----- diff --git a/modules/checking-mco-status.adoc b/modules/checking-mco-status.adoc deleted file mode 100644 index a0693741c0ba..000000000000 --- a/modules/checking-mco-status.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="checking-mco-status_{context}"] -= Checking machine config pool status - -To see the status of the Machine Config Operator (MCO), its sub-components, and the resources it manages, use the following `oc` commands: - -.Procedure -. To see the number of MCO-managed nodes available on your cluster for each machine config pool (MCP), run the following command: -+ -[source,terminal] ----- -$ oc get machineconfigpool ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-06c9c4… True False False 3 3 3 0 4h42m -worker rendered-worker-f4b64… False True False 3 2 2 0 4h42m ----- -+ --- -where: - -UPDATED:: The `True` status indicates that the MCO has applied the current machine config to the nodes in that MCP. The current machine config is specified in the `STATUS` field in the `oc get mcp` output. The `False` status indicates a node in the MCP is updating. -UPDATING:: The `True` status indicates that the MCO is applying the desired machine config, as specified in the `MachineConfigPool` custom resource, to at least one of the nodes in that MCP. The desired machine config is the new, edited machine config. Nodes that are updating might not be available for scheduling. The `False` status indicates that all nodes in the MCP are updated. -DEGRADED:: A `True` status indicates the MCO is blocked from applying the current or desired machine config to at least one of the nodes in that MCP, or the configuration is failing. Nodes that are degraded might not be available for scheduling. A `False` status indicates that all nodes in the MCP are ready. -MACHINECOUNT:: Indicates the total number of machines in that MCP. -READYMACHINECOUNT:: Indicates the total number of machines in that MCP that are ready for scheduling. -UPDATEDMACHINECOUNT:: Indicates the total number of machines in that MCP that have the current machine config. -DEGRADEDMACHINECOUNT:: Indicates the total number of machines in that MCP that are marked as degraded or unreconcilable. --- -+ -In the previous output, there are three control plane (master) nodes and three worker nodes. The control plane MCP and the associated nodes are updated to the current machine config. The nodes in the worker MCP are being updated to the desired machine config. Two of the nodes in the worker MCP are updated and one is still updating, as indicated by the `UPDATEDMACHINECOUNT` being `2`. There are no issues, as indicated by the `DEGRADEDMACHINECOUNT` being `0` and `DEGRADED` being `False`. -+ -While the nodes in the MCP are updating, the machine config listed under `CONFIG` is the current machine config, which the MCP is being updated from. When the update is complete, the listed machine config is the desired machine config, which the MCP was updated to. -+ -[NOTE] -==== -If a node is being cordoned, that node is not included in the `READYMACHINECOUNT`, but is included in the `MACHINECOUNT`. Also, the MCP status is set to `UPDATING`. Because the node has the current machine config, it is counted in the `UPDATEDMACHINECOUNT` total: - -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-06c9c4… True False False 3 3 3 0 4h42m -worker rendered-worker-c1b41a… False True False 3 2 3 0 4h42m ----- -==== - -. To check the status of the nodes in an MCP by examining the `MachineConfigPool` custom resource, run the following command: -: -+ -[source,terminal] ----- -$ oc describe mcp worker ----- -+ -.Example output -[source,terminal] ----- -... - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 2 - Ready Machine Count: 3 - Unavailable Machine Count: 0 - Updated Machine Count: 3 -Events: ----- -+ -[NOTE] -==== -If a node is being cordoned, the node is not included in the `Ready Machine Count`. It is included in the `Unavailable Machine Count`: - -.Example output -[source,terminal] ----- -... - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 2 - Ready Machine Count: 2 - Unavailable Machine Count: 1 - Updated Machine Count: 3 ----- -==== - -. To see each existing `MachineConfig` object, run the following command: -+ -[source,terminal] ----- -$ oc get machineconfigs ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -00-worker 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -01-master-container-runtime 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -01-master-kubelet 2c9371fbb673b97a6fe8b1c52… 3.2.0 5h18m -... -rendered-master-dde... 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -rendered-worker-fde... 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m ----- -+ -Note that the `MachineConfig` objects listed as `rendered` are not meant to be changed or deleted. - -. To view the contents of a particular machine config (in this case, `01-master-kubelet`), run the following command: -+ -[source,terminal] ----- -$ oc describe machineconfigs 01-master-kubelet ----- -+ -The output from the command shows that this `MachineConfig` object contains both configuration files (`cloud.conf` and `kubelet.conf`) and a systemd service (Kubernetes Kubelet): -+ -.Example output -[source,terminal] ----- -Name: 01-master-kubelet -... -Spec: - Config: - Ignition: - Version: 3.2.0 - Storage: - Files: - Contents: - Source: data:, - Mode: 420 - Overwrite: true - Path: /etc/kubernetes/cloud.conf - Contents: - Source: data:,kind%3A%20KubeletConfiguration%0AapiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20anonymous... - Mode: 420 - Overwrite: true - Path: /etc/kubernetes/kubelet.conf - Systemd: - Units: - Contents: [Unit] -Description=Kubernetes Kubelet -Wants=rpc-statd.service network-online.target crio.service -After=network-online.target crio.service - -ExecStart=/usr/bin/hyperkube \ - kubelet \ - --config=/etc/kubernetes/kubelet.conf \ ... ----- - -If something goes wrong with a machine config that you apply, you can always back out that change. For example, if you had run `oc create -f ./myconfig.yaml` to apply a machine config, you could remove that machine config by running the following command: - -[source,terminal] ----- -$ oc delete -f ./myconfig.yaml ----- - -If that was the only problem, the nodes in the affected pool should return to a non-degraded state. This actually causes the rendered configuration to roll back to its previously rendered state. - -If you add your own machine configs to your cluster, you can use the commands shown in the previous example to check their status and the related status of the pool to which they are applied. diff --git a/modules/checking-project-status-using-the-CLI.adoc b/modules/checking-project-status-using-the-CLI.adoc deleted file mode 100644 index 0849609763c7..000000000000 --- a/modules/checking-project-status-using-the-CLI.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="checking-project-status-using-the-CLI_{context}"] -= Checking project status using the CLI - -.Procedure - -. Run: -+ -[source,terminal] ----- -$ oc status ----- -+ -This command provides a high-level overview of the current project, with its -components and their relationships. diff --git a/modules/checking-project-status-using-the-web-console.adoc b/modules/checking-project-status-using-the-web-console.adoc deleted file mode 100644 index fb0d6cd6f95c..000000000000 --- a/modules/checking-project-status-using-the-web-console.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="checking-project-status-using-the-web-console_{context}"] -= Checking project status using the web console - -.Procedure - -. Navigate to *Home* -> *Projects*. - -. Select a project to see its status. diff --git a/modules/cleaning-crio-storage.adoc b/modules/cleaning-crio-storage.adoc deleted file mode 100644 index ce9827b9ad1e..000000000000 --- a/modules/cleaning-crio-storage.adoc +++ /dev/null @@ -1,137 +0,0 @@ -:_content-type: PROCEDURE -[id="cleaning-crio-storage_{context}"] - -= Cleaning CRI-O storage - -You can manually clear the CRI-O ephemeral storage if you experience the following issues: - -* A node cannot run on any pods and this error appears: -[source, terminal] -+ ----- -Failed to create pod sandbox: rpc error: code = Unknown desc = failed to mount container XXX: error recreating the missing symlinks: error reading name of symlink for XXX: open /var/lib/containers/storage/overlay/XXX/link: no such file or directory ----- -+ -* You cannot create a new container on a working node and the “can’t stat lower layer” error appears: -[source, terminal] -+ ----- -can't stat lower layer ... because it does not exist. Going through storage to recreate the missing symlinks. ----- -+ -* Your node is in the `NotReady` state after a cluster upgrade or if you attempt to reboot it. - -* The container runtime implementation (`crio`) is not working properly. - -* You are unable to start a debug shell on the node using `oc debug node/` because the container runtime instance (`crio`) is not working. - -Follow this process to completely wipe the CRI-O storage and resolve the errors. - -.Prerequisites: - - * You have access to the cluster as a user with the `cluster-admin` role. - * You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Use `cordon` on the node. This is to avoid any workload getting scheduled if the node gets into the `Ready` status. You will know that scheduling is disabled when `SchedulingDisabled` is in your Status section: -[source, terminal] -+ ----- -$ oc adm cordon ----- -+ -. Drain the node as the cluster-admin user: -[source, terminal] -+ ----- -$ oc adm drain --ignore-daemonsets --delete-emptydir-data ----- -+ -[NOTE] -==== -The `terminationGracePeriodSeconds` attribute of a pod or pod template controls the graceful termination period. This attribute defaults at 30 seconds, but can be customized per application as necessary. If set to more than 90 seconds, the pod might be marked as `SIGKILLed` and fail to terminate successfully. -==== - -. When the node returns, connect back to the node via SSH or Console. Then connect to the root user: -[source, terminal] -+ ----- -$ ssh core@node1.example.com -$ sudo -i ----- -+ -. Manually stop the kubelet: -[source, terminal] -+ ----- -# systemctl stop kubelet ----- -+ -. Stop the containers and pods: - -.. Use the following command to stop the pods that are not in the `HostNetwork`. They must be removed first because their removal relies on the networking plugin pods, which are in the `HostNetwork`. -[source, terminal] -+ ----- -.. for pod in $(crictl pods -q); do if [[ "$(crictl inspectp $pod | jq -r .status.linux.namespaces.options.network)" != "NODE" ]]; then crictl rmp -f $pod; fi; done ----- - -.. Stop all other pods: -[source, terminal] -+ ----- -# crictl rmp -fa ----- -+ -. Manually stop the crio services: -[source, terminal] -+ ----- -# systemctl stop crio ----- -+ -. After you run those commands, you can completely wipe the ephemeral storage: -[source, terminal] -+ ----- -# crio wipe -f ----- -+ -. Start the crio and kubelet service: -[source, terminal] -+ ----- -# systemctl start crio -# systemctl start kubelet ----- -+ -. You will know if the clean up worked if the crio and kubelet services are started, and the node is in the `Ready` status: -[source, terminal] -+ ----- -$ oc get nodes ----- -+ -.Example output -[source, terminal] -+ ----- -NAME STATUS ROLES AGE VERSION -ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready, SchedulingDisabled master 133m v1.27.3 ----- -+ -. Mark the node schedulable. You will know that the scheduling is enabled when `SchedulingDisabled` is no longer in status: -[source, terminal] -+ ----- -$ oc adm uncordon ----- -+ -.Example output -[source, terminal] -+ ----- -NAME STATUS ROLES AGE VERSION -ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready master 133m v1.27.3 ----- diff --git a/modules/cli-about-cli.adoc b/modules/cli-about-cli.adoc deleted file mode 100644 index 4f25aaa91f68..000000000000 --- a/modules/cli-about-cli.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -:_content-type: CONCEPT -[id="cli-about-cli_{context}"] -= About the OpenShift CLI - -With the OpenShift command-line interface (CLI), the `oc` command, you can create applications and manage {product-title} projects from a terminal. The OpenShift CLI is ideal in the following situations: - -* Working directly with project source code -* Scripting {product-title} operations -ifndef::microshift[] -* Managing projects while restricted by bandwidth resources and the web console is unavailable -endif::microshift[] -ifdef::microshift[] -* Managing projects while restricted by bandwidth resources -endif::microshift[] diff --git a/modules/cli-configuring-completion-zsh.adoc b/modules/cli-configuring-completion-zsh.adoc deleted file mode 100644 index 5fb7754406c6..000000000000 --- a/modules/cli-configuring-completion-zsh.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/configuring-cli.adoc - -:_content-type: PROCEDURE -[id="cli-enabling-tab-completion-zsh_{context}"] -= Enabling tab completion for Zsh - -After you install the OpenShift CLI (`oc`), you can enable tab completion to automatically complete `oc` commands or suggest options when you press Tab. The following procedure enables tab completion for the Zsh shell. - -.Prerequisites - -* You must have the OpenShift CLI (`oc`) installed. - -.Procedure - -* To add tab completion for `oc` to your `.zshrc` file, run the following command: -+ -[source,terminal] ----- -$ cat >>~/.zshrc< oc_bash_completion ----- - -. Copy the file to `/etc/bash_completion.d/`: -+ -[source,terminal] ----- -$ sudo cp oc_bash_completion /etc/bash_completion.d/ ----- -+ -You can also save the file to a local directory and source it from your `.bashrc` file instead. - -Tab completion is enabled when you open a new terminal. diff --git a/modules/cli-extending-plugins-installing.adoc b/modules/cli-extending-plugins-installing.adoc deleted file mode 100644 index 33e91a5e3764..000000000000 --- a/modules/cli-extending-plugins-installing.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/extending-cli-plugins.adoc - -:_content-type: PROCEDURE -[id="cli-installing-plugins_{context}"] -= Installing and using CLI plugins - -After you write a custom plugin for the {product-title} CLI, you must install -it to use the functionality that it provides. - -.Prerequisites - -* You must have the `oc` CLI tool installed. -* You must have a CLI plugin file that begins with `oc-` or `kubectl-`. - -.Procedure - -. If necessary, update the plugin file to be executable. -+ -[source,terminal] ----- -$ chmod +x ----- -. Place the file anywhere in your `PATH`, such as `/usr/local/bin/`. -+ -[source,terminal] ----- -$ sudo mv /usr/local/bin/. ----- -. Run `oc plugin list` to make sure that the plugin is listed. -+ -[source,terminal] ----- -$ oc plugin list ----- -+ -.Example output -[source,terminal] ----- -The following compatible plugins are available: - -/usr/local/bin/ ----- -+ -If your plugin is not listed here, verify that the file begins with `oc-` -or `kubectl-`, is executable, and is on your `PATH`. -. Invoke the new command or option introduced by the plugin. -+ -For example, if you built and installed the `kubectl-ns` plugin from the - link:https://github.com/kubernetes/sample-cli-plugin[Sample plugin repository], - you can use the following command to view the current namespace. -+ -[source,terminal] ----- -$ oc ns ----- -+ -Note that the command to invoke the plugin depends on the plugin file name. -For example, a plugin with the file name of `oc-foo-bar` is invoked by the `oc foo bar` -command. diff --git a/modules/cli-extending-plugins-writing.adoc b/modules/cli-extending-plugins-writing.adoc deleted file mode 100644 index a71a89e167a3..000000000000 --- a/modules/cli-extending-plugins-writing.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/extending-cli-plugins.adoc - -:_content-type: PROCEDURE -[id="cli-writing-plugins_{context}"] -= Writing CLI plugins - -You can write a plugin for the {product-title} CLI in any programming language -or script that allows you to write command-line commands. Note that you can not -use a plugin to overwrite an existing `oc` command. - -.Procedure - -This procedure creates a simple Bash plugin that prints a message to the -terminal when the `oc foo` command is issued. - -. Create a file called `oc-foo`. -+ -When naming your plugin file, keep the following in mind: - -* The file must begin with `oc-` or `kubectl-` to be recognized as a -plugin. -* The file name determines the command that invokes the plugin. For example, a -plugin with the file name `oc-foo-bar` can be invoked by a command of -`oc foo bar`. You can also use underscores if you want the command to contain -dashes. For example, a plugin with the file name `oc-foo_bar` can be invoked -by a command of `oc foo-bar`. - -. Add the following contents to the file. -+ -[source,bash] ----- -#!/bin/bash - -# optional argument handling -if [[ "$1" == "version" ]] -then - echo "1.0.0" - exit 0 -fi - -# optional argument handling -if [[ "$1" == "config" ]] -then - echo $KUBECONFIG - exit 0 -fi - -echo "I am a plugin named kubectl-foo" ----- - -After you install this plugin for the {product-title} CLI, it can be invoked -using the `oc foo` command. - -[role="_additional-resources"] -.Additional resources - -* Review the link:https://github.com/kubernetes/sample-cli-plugin[Sample plugin repository] -for an example of a plugin written in Go. -* Review the link:https://github.com/kubernetes/cli-runtime/[CLI runtime repository] for a set of utilities to assist in writing plugins in Go. diff --git a/modules/cli-getting-help.adoc b/modules/cli-getting-help.adoc deleted file mode 100644 index 0bbcbf6ce47d..000000000000 --- a/modules/cli-getting-help.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-getting-help_{context}"] -= Getting help - -You can get help with CLI commands and {product-title} resources in the -following ways. - -* Use `oc help` to get a list and description of all available CLI commands: -+ -.Example: Get general help for the CLI -[source,terminal] ----- -$ oc help ----- -+ -.Example output -[source,terminal] ----- -OpenShift Client - -This client helps you develop, build, deploy, and run your applications on any OpenShift or Kubernetes compatible -platform. It also includes the administrative commands for managing a cluster under the 'adm' subcommand. - -Usage: - oc [flags] - -Basic Commands: - login Log in to a server - new-project Request a new project - new-app Create a new application - -... ----- - -* Use the `--help` flag to get help about a specific CLI command: -+ -.Example: Get help for the `oc create` command -[source,terminal] ----- -$ oc create --help ----- -+ -.Example output -[source,terminal] ----- -Create a resource by filename or stdin - -JSON and YAML formats are accepted. - -Usage: - oc create -f FILENAME [flags] - -... ----- - -* Use the `oc explain` command to view the description and fields for a -particular resource: -+ -.Example: View documentation for the `Pod` resource -[source,terminal] ----- -$ oc explain pods ----- -+ -.Example output -[source,terminal] ----- -KIND: Pod -VERSION: v1 - -DESCRIPTION: - Pod is a collection of containers that can run on a host. This resource is - created by clients and scheduled onto hosts. - -FIELDS: - apiVersion - APIVersion defines the versioned schema of this representation of an - object. Servers should convert recognized schemas to the latest internal - value, and may reject unrecognized values. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#resources - -... ----- diff --git a/modules/cli-installing-cli-brew.adoc b/modules/cli-installing-cli-brew.adoc deleted file mode 100644 index 8315eeef2c46..000000000000 --- a/modules/cli-installing-cli-brew.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc -// * microshift_cli_ref/microshift_oc_cli_install.adoc - -:_content-type: PROCEDURE -[id="cli-installing-cli-brew_{context}"] -= Installing the OpenShift CLI by using Homebrew - -For macOS, you can install the OpenShift CLI (`oc`) by using the link:https://brew.sh[Homebrew] package manager. - -.Prerequisites - -* You must have Homebrew (`brew`) installed. - -.Procedure - -* Run the following command to install the link:https://formulae.brew.sh/formula/openshift-cli[openshift-cli] package: -+ -[source,terminal] ----- -$ brew install openshift-cli ----- diff --git a/modules/cli-installing-cli-rpm.adoc b/modules/cli-installing-cli-rpm.adoc deleted file mode 100644 index e5adcec7ab81..000000000000 --- a/modules/cli-installing-cli-rpm.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc -// * microshift_cli_ref/microshift_oc_cli_install.adoc - -:_content-type: PROCEDURE -[id="cli-installing-cli-rpm_{context}"] -= Installing the OpenShift CLI by using an RPM - -For {op-system-base-full}, you can install the OpenShift CLI (`oc`) as an RPM if you have an active {product-title} subscription on your Red Hat account. - -.Prerequisites - -* Must have root or sudo privileges. - -.Procedure - -. Register with Red Hat Subscription Manager: -+ -[source,terminal] ----- -# subscription-manager register ----- - -. Pull the latest subscription data: -+ -[source,terminal] ----- -# subscription-manager refresh ----- - -. List the available subscriptions: -+ -[source,terminal] ----- -# subscription-manager list --available --matches '*OpenShift*' ----- - -. In the output for the previous command, find the pool ID for an {product-title} subscription and attach the subscription to the registered system: -+ -[source,terminal] ----- -# subscription-manager attach --pool= ----- - -. Enable the repositories required by {product-title} {product-version}. -+ -[source,terminal,subs="attributes+"] ----- -# subscription-manager repos --enable="rhocp-{product-version}-for-rhel-8-x86_64-rpms" ----- -+ -[NOTE] -==== -It is not supported to install the OpenShift CLI (`oc`) as an RPM for {op-system-base-full} 9. You must install the OpenShift CLI for {op-system-base} 9 by downloading the binary. -==== - -. Install the `openshift-clients` package: -+ -[source,terminal] ----- -# yum install openshift-clients ----- - -After you install the CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- diff --git a/modules/cli-installing-cli-web-console-linux.adoc b/modules/cli-installing-cli-web-console-linux.adoc deleted file mode 100644 index d1490d480359..000000000000 --- a/modules/cli-installing-cli-web-console-linux.adoc +++ /dev/null @@ -1,45 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos-linux_{context}"] -= Installing the OpenShift CLI on Linux using the web console - -You can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select appropriate `oc` binary for your Linux platform, and then click *Download oc for Linux*. -. Save the file. -. Unpack the archive. -+ -[source,terminal] ----- -$ tar xvf ----- -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console-macos.adoc b/modules/cli-installing-cli-web-console-macos.adoc deleted file mode 100644 index 02e13fc47b2d..000000000000 --- a/modules/cli-installing-cli-web-console-macos.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos_{context}"] -= Installing the OpenShift CLI on macOS using the web console -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -You can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select the `oc` binary for macOS platform, and then click *Download oc for Mac for x86_64*. -+ -[NOTE] -==== -For macOS arm64, click *Download oc for Mac for ARM 64*. -==== - -. Save the file. -. Unpack and unzip the archive. -. Move the `oc` binary to a directory on your PATH. -+ -To check your `PATH`, open a terminal and execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console-windows.adoc b/modules/cli-installing-cli-web-console-windows.adoc deleted file mode 100644 index fa18b6e7f06c..000000000000 --- a/modules/cli-installing-cli-web-console-windows.adoc +++ /dev/null @@ -1,40 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos-windows_{context}"] -= Installing the OpenShift CLI on Windows using the web console - -You can install the OpenShift CLI (`oc`) binary on Winndows by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select the `oc` binary for Windows platform, and then click *Download oc for Windows for x86_64*. -. Save the file. -. Unzip the archive with a ZIP program. -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, open the command prompt and execute the following command: -+ -[source,terminal] ----- -C:\> path ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -C:\> oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console.adoc b/modules/cli-installing-cli-web-console.adoc deleted file mode 100644 index c991721ea6eb..000000000000 --- a/modules/cli-installing-cli-web-console.adoc +++ /dev/null @@ -1,22 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -[id="cli-installing-cli-web-console_{context}"] -= Installing the OpenShift CLI by using the web console - -You can install the OpenShift CLI (`oc`) to interact with {product-title} from a web console. You can install `oc` on Linux, Windows, or macOS. - -[IMPORTANT] -==== -If you installed an earlier version of `oc`, you cannot use it to complete all -of the commands in {product-title} {product-version}. Download and -install the new version of `oc`. -ifdef::restricted[] -If you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to. -endif::restricted[] -==== - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli.adoc b/modules/cli-installing-cli.adoc deleted file mode 100644 index a2d0d4e3060e..000000000000 --- a/modules/cli-installing-cli.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * cli_reference/openshift_cli/getting-started.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adocs -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc -// * microshift_cli_ref/microshift-oc-cli-install.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// AMQ docs link to this; do not change anchor - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli_{context}"] -= Installing the OpenShift CLI by downloading the binary - -You can install the OpenShift CLI (`oc`) to interact with {product-title} from a -command-line interface. You can install `oc` on Linux, Windows, or macOS. - -[IMPORTANT] -==== -If you installed an earlier version of `oc`, you cannot use it to complete all of the commands in {product-title} {product-version}. Download and install the new version of `oc`. -ifdef::restricted[] -If you are updating a cluster in a disconnected environment, install the `oc` version that you plan to update to. -endif::restricted[] -==== - -[discrete] -== Installing the OpenShift CLI on Linux - -You can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.tar.gz`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the architecture from the *Product Variant* drop-down list. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Linux Client* entry and save the file. -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the architecture from the *Product Variant* drop-down list. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Linux Client* entry and save the file. -endif::[] -. Unpack the archive: -+ -[source,terminal] ----- -$ tar xvf ----- -. Place the `oc` binary in a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -[discrete] -== Installing the OpenShift CLI on Windows - -You can install the OpenShift CLI (`oc`) binary on Windows by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.zip`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Windows Client* entry and save the file. -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Windows Client* entry and save the file. -endif::[] -. Unzip the archive with a ZIP program. -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, open the command prompt and execute the following command: -+ -[source,terminal] ----- -C:\> path ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -C:\> oc ----- - -[discrete] -== Installing the OpenShift CLI on macOS - -You can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.tar.gz`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} macOS Client* entry and save the file. -+ -[NOTE] -==== -For macOS arm64, choose the *OpenShift v{product-version} macOS arm64 Client* entry. -==== -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} macOS Client* entry and save the file. -endif::[] -. Unpack and unzip the archive. -. Move the `oc` binary to a directory on your PATH. -+ -To check your `PATH`, open a terminal and execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!restricted: -endif::[] diff --git a/modules/cli-krew-install-plugin.adoc b/modules/cli-krew-install-plugin.adoc deleted file mode 100644 index e35186d4b426..000000000000 --- a/modules/cli-krew-install-plugin.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-install-plugin_{context}"] -= Installing a CLI plugin with Krew - -You can install a plugin for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. - -.Procedure - -. To list all available plugins, run the following command: -+ -[source,terminal] ----- -$ oc krew search ----- - -. To get information about a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew info ----- - -. To install a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew install ----- - -. To list all plugins that were installed by Krew, run the following command: -+ -[source,terminal] ----- -$ oc krew list ----- diff --git a/modules/cli-krew-remove-plugin.adoc b/modules/cli-krew-remove-plugin.adoc deleted file mode 100644 index 5c43e2a6b427..000000000000 --- a/modules/cli-krew-remove-plugin.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-remove-plugin_{context}"] -= Uninstalling a CLI plugin with Krew - -You can uninstall a plugin that was installed for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. -* You have installed a plugin for the OpenShift CLI with Krew. - -.Procedure - -* To uninstall a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew uninstall ----- diff --git a/modules/cli-krew-update-plugin.adoc b/modules/cli-krew-update-plugin.adoc deleted file mode 100644 index ddaf47993cf1..000000000000 --- a/modules/cli-krew-update-plugin.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-update-plugin_{context}"] -= Updating a CLI plugin with Krew - -You can update a plugin that was installed for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. -* You have installed a plugin for the OpenShift CLI with Krew. - -.Procedure - -* To update a single plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew upgrade ----- - -* To update all plugins that were installed by Krew, run the following command: -+ -[source,terminal] ----- -$ oc krew upgrade ----- diff --git a/modules/cli-logging-in-kubeadmin.adoc b/modules/cli-logging-in-kubeadmin.adoc deleted file mode 100644 index f144147287db..000000000000 --- a/modules/cli-logging-in-kubeadmin.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp_user_infra/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc - - -:_content-type: PROCEDURE -[id="cli-logging-in-kubeadmin_{context}"] -= Logging in to the cluster by using the CLI - -You can log in to your cluster as a default system user by exporting the cluster `kubeconfig` file. -The `kubeconfig` file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. -The file is specific to a cluster and is created during {product-title} installation. - -.Prerequisites - -* You deployed an {product-title} cluster. -* You installed the `oc` CLI. - -.Procedure - -. Export the `kubeadmin` credentials: -+ -[source,terminal] ----- -$ export KUBECONFIG=/auth/kubeconfig <1> ----- -<1> For ``, specify the path to the directory that you stored -the installation files in. - -. Verify you can run `oc` commands successfully using the exported configuration: -+ -[source,terminal] ----- -$ oc whoami ----- -+ -.Example output -[source,terminal] ----- -system:admin ----- diff --git a/modules/cli-logging-in-web.adoc b/modules/cli-logging-in-web.adoc deleted file mode 100644 index df53d2a14610..000000000000 --- a/modules/cli-logging-in-web.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -:_content-type: PROCEDURE -[id="cli-logging-in-web_{context}"] -= Logging in to the OpenShift CLI using a web browser - -You can log in to the OpenShift CLI (`oc`) with the help of a web browser to access and manage your cluster. This allows users to avoid inserting their access token into the command line. - -[WARNING] -==== -Logging in to the CLI through the web browser runs a server on localhost with HTTP, not HTTPS; use with caution on multi-user workstations. -==== - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You must have a browser installed. - -.Procedure - -. Enter the `oc login` command with the `--web` flag: -+ -[source,terminal] ----- -$ oc login --web <1> ----- -<1> Optionally, you can specify the server URL and callback port. For example, `oc login --web --callback-port 8280 localhost:8443`. - -. The web browser opens automatically. If it does not, click the link in the command output. If you do not specify the {product-title} server `oc` tries to open the web console of the cluster specified in the current `oc` configuration file. If no `oc` configuration exists, `oc` prompts interactively for the server URL. -+ -.Example output - -[source,terminal] ----- -Opening login URL in the default browser: https://openshift.example.com -Opening in existing browser session. ----- - -. If more than one identity provider is available, select your choice from the options provided. - -. Enter your username and password into the corresponding browser fields. After you are logged in, the browser displays the text `access token received successfully; please return to your terminal`. - -. Check the CLI for a login confirmation. -+ -.Example output - -[source,terminal] ----- -Login successful. - -You don't have any projects. You can try to create a new project, by running - - oc new-project - ----- - -[NOTE] -==== -The web console defaults to the profile used in the previous session. To switch between Administrator and Developer profiles, log out of the {product-title} web console and clear the cache. -==== - -You can now create a project or issue other commands for managing your cluster. \ No newline at end of file diff --git a/modules/cli-logging-in.adoc b/modules/cli-logging-in.adoc deleted file mode 100644 index fa4bf9ab6a20..000000000000 --- a/modules/cli-logging-in.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -:_content-type: PROCEDURE -[id="cli-logging-in_{context}"] -= Logging in to the OpenShift CLI - -You can log in to the OpenShift CLI (`oc`) to access and manage your cluster. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). - -[NOTE] -==== -To access a cluster that is accessible only over an HTTP proxy server, you can set the `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` variables. -These environment variables are respected by the `oc` CLI so that all communication with the cluster goes through the HTTP proxy. - -Authentication headers are sent only when using HTTPS transport. -==== - -.Procedure - -. Enter the `oc login` command and pass in a user name: -+ -[source,terminal] ----- -$ oc login -u user1 ----- - -. When prompted, enter the required information: -+ -.Example output -[source,terminal] ----- -Server [https://localhost:8443]: https://openshift.example.com:6443 <1> -The server uses a certificate signed by an unknown authority. -You can bypass the certificate check, but any data you send to the server could be intercepted by others. -Use insecure connections? (y/n): y <2> - -Authentication required for https://openshift.example.com:6443 (openshift) -Username: user1 -Password: <3> -Login successful. - -You don't have any projects. You can try to create a new project, by running - - oc new-project - -Welcome! See 'oc help' to get started. ----- -<1> Enter the {product-title} server URL. -<2> Enter whether to use insecure connections. -<3> Enter the user's password. - -[NOTE] -==== -If you are logged in to the web console, you can generate an `oc login` command that includes your token and server information. You can use the command to log in to the {product-title} CLI without the interactive prompts. To generate the command, select *Copy login command* from the username drop-down menu at the top right of the web console. -==== - -You can now create a project or issue other commands for managing your cluster. diff --git a/modules/cli-logging-out.adoc b/modules/cli-logging-out.adoc deleted file mode 100644 index 57be58cea225..000000000000 --- a/modules/cli-logging-out.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-logging-out_{context}"] -= Logging out of the OpenShift CLI - -You can log out the OpenShift CLI to end your current session. - -* Use the `oc logout` command. -+ -[source,terminal] ----- -$ oc logout ----- -+ -.Example output -[source,terminal] ----- -Logged "user1" out on "https://openshift.example.com" ----- - -This deletes the saved authentication token from the server and removes it from -your configuration file. diff --git a/modules/cli-using-cli.adoc b/modules/cli-using-cli.adoc deleted file mode 100644 index db6446f6a3f0..000000000000 --- a/modules/cli-using-cli.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-using-cli_{context}"] -= Using the OpenShift CLI - -Review the following sections to learn how to complete common tasks using the CLI. - -ifndef::microshift[] -== Creating a project - -Use the `oc new-project` command to create a new project. - -[source,terminal] ----- -$ oc new-project my-project ----- - -.Example output -[source,terminal] ----- -Now using project "my-project" on server "https://openshift.example.com:6443". ----- -endif::microshift[] - -ifndef::microshift[] -== Creating a new app - -Use the `oc new-app` command to create a new application. - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/cakephp-ex ----- - -.Example output -[source,terminal] ----- ---> Found image 40de956 (9 days old) in imagestream "openshift/php" under tag "7.2" for "php" - -... - - Run 'oc status' to view your app. ----- -endif::microshift[] - -== Viewing pods - -Use the `oc get pods` command to view the pods for the current project. - -[NOTE] -==== -When you run `oc` inside a pod and do not specify a namespace, the namespace of the pod is used by default. -==== - -[source,terminal] ----- -$ oc get pods -o wide ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE -cakephp-ex-1-build 0/1 Completed 0 5m45s 10.131.0.10 ip-10-0-141-74.ec2.internal -cakephp-ex-1-deploy 0/1 Completed 0 3m44s 10.129.2.9 ip-10-0-147-65.ec2.internal -cakephp-ex-1-ktz97 1/1 Running 0 3m33s 10.128.2.11 ip-10-0-168-105.ec2.internal ----- - -== Viewing pod logs - -Use the `oc logs` command to view logs for a particular pod. - -[source,terminal] ----- -$ oc logs cakephp-ex-1-deploy ----- - -.Example output -[source,terminal] ----- ---> Scaling cakephp-ex-1 to 1 ---> Success ----- - -ifndef::microshift[] -== Viewing the current project - -Use the `oc project` command to view the current project. - -[source,terminal] ----- -$ oc project ----- - -.Example output -[source,terminal] ----- -Using project "my-project" on server "https://openshift.example.com:6443". ----- - -== Viewing the status for the current project - -Use the `oc status` command to view information about the current project, such -as services, deployments, and build configs. - -[source,terminal] ----- -$ oc status ----- - -.Example output -[source,terminal] ----- -In project my-project on server https://openshift.example.com:6443 - -svc/cakephp-ex - 172.30.236.80 ports 8080, 8443 - dc/cakephp-ex deploys istag/cakephp-ex:latest <- - bc/cakephp-ex source builds https://github.com/sclorg/cakephp-ex on openshift/php:7.2 - deployment #1 deployed 2 minutes ago - 1 pod - -3 infos identified, use 'oc status --suggest' to see details. ----- -endif::microshift[] - -== Listing supported API resources - -Use the `oc api-resources` command to view the list of supported API resources -on the server. - -[source,terminal] ----- -$ oc api-resources ----- - -.Example output -[source,terminal] ----- -NAME SHORTNAMES APIGROUP NAMESPACED KIND -bindings true Binding -componentstatuses cs false ComponentStatus -configmaps cm true ConfigMap -... ----- diff --git a/modules/cloud-credential-operator.adoc b/modules/cloud-credential-operator.adoc deleted file mode 100644 index d3daf7ae2224..000000000000 --- a/modules/cloud-credential-operator.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cloud-credential-operator_{context}"] -= Cloud Credential Operator - -[discrete] -== Purpose - -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run. - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. - -[discrete] -== Project - -link:https://github.com/openshift/cloud-credential-operator[openshift-cloud-credential-operator] - -[discrete] -== CRDs - -* `credentialsrequests.cloudcredential.openshift.io` -** Scope: Namespaced -** CR: `CredentialsRequest` -** Validation: Yes - -[discrete] -== Configuration objects - -No configuration required. diff --git a/modules/cluster-api-architecture.adoc b/modules/cluster-api-architecture.adoc deleted file mode 100644 index 9a952b3e4dac..000000000000 --- a/modules/cluster-api-architecture.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: CONCEPT -[id="cluster-api-architecture_{context}"] -= Cluster API architecture - -The {product-title} integration of the upstream Cluster API is implemented and managed by the Cluster CAPI Operator. The Cluster CAPI Operator and its operands are provisioned in the `openshift-cluster-api` namespace, in contrast to the Machine API, which uses the `openshift-machine-api` namespace. - -[id="capi-arch-operator"] -== The Cluster CAPI Operator - -The Cluster CAPI Operator is an {product-title} Operator that maintains the lifecycle of Cluster API resources. This Operator is responsible for all administrative tasks related to deploying the Cluster API project within an {product-title} cluster. - -If a cluster is configured correctly to allow the use of the Cluster API, the Cluster CAPI Operator installs the Cluster API Operator on the cluster. - -[NOTE] -==== -The Cluster CAPI Operator is distinct from the upstream Cluster API Operator. -==== - -For more information, see the entry for the Cluster CAPI Operator in the _Cluster Operators reference_ content. - -[id="capi-arch-resources"] -== Primary resources - -The Cluster API is comprised of the following primary resources. For the Technology Preview of this feature, you must create these resources manually in the `openshift-cluster-api` namespace. - -Cluster:: A fundamental unit that represents a cluster that is managed by the Cluster API. - -Infrastructure:: A provider-specific resource that defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. - -Machine template:: A provider-specific template that defines the properties of the machines that a compute machine set creates. - -Machine set:: A group of machines. -+ -Compute machine sets are to machines as replica sets are to pods. If you need more machines or must scale them down, you change the `replicas` field on the compute machine set to meet your compute needs. -+ -With the Cluster API, a compute machine set references a `Cluster` object and a provider-specific machine template. - -Machine:: A fundamental unit that describes the host for a node. -+ -The Cluster API creates machines based on the configuration in the machine template. \ No newline at end of file diff --git a/modules/cluster-authentication-operator.adoc b/modules/cluster-authentication-operator.adoc deleted file mode 100644 index 0056de181e25..000000000000 --- a/modules/cluster-authentication-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-authentication-operator_{context}"] -= Cluster Authentication Operator - -[discrete] -== Purpose - -The Cluster Authentication Operator installs and maintains the `Authentication` custom resource in a cluster and can be viewed with: - -[source,terminal] ----- -$ oc get clusteroperator authentication -o yaml ----- - -[discrete] -== Project - -link:https://github.com/openshift/cluster-authentication-operator[cluster-authentication-operator] diff --git a/modules/cluster-autoscaler-about.adoc b/modules/cluster-autoscaler-about.adoc deleted file mode 100644 index a73d93c8a571..000000000000 --- a/modules/cluster-autoscaler-about.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-about-autoscaling-nodes.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * machine_management/applying-autoscaling.adoc -// * osd_cluster_admin/osd_nodes/osd-nodes-about-autoscaling-nodes.adoc - -:_content-type: CONCEPT -[id="cluster-autoscaler-about_{context}"] -= About the cluster autoscaler - -The cluster autoscaler adjusts the size of an {product-title} cluster to meet its current deployment needs. It uses declarative, Kubernetes-style arguments to provide infrastructure management that does not rely on objects of a specific cloud provider. The cluster autoscaler has a cluster scope, and is not associated with a particular namespace. - -The cluster autoscaler increases the size of the cluster when there are pods that fail to schedule on any of the current worker nodes due to insufficient resources or when another node is necessary to meet deployment needs. The cluster autoscaler does not increase the cluster resources beyond the limits that you specify. - -The cluster autoscaler computes the total -ifndef::openshift-dedicated,openshift-rosa[] -memory, CPU, and GPU -endif::[] -ifdef::openshift-dedicated,openshift-rosa[] -memory and CPU -endif::[] -on all nodes the cluster, even though it does not manage the control plane nodes. These values are not single-machine oriented. They are an aggregation of all the resources in the entire cluster. For example, if you set the maximum memory resource limit, the cluster autoscaler includes all the nodes in the cluster when calculating the current memory usage. That calculation is then used to determine if the cluster autoscaler has the capacity to add more worker resources. - -[IMPORTANT] -==== -Ensure that the `maxNodesTotal` value in the `ClusterAutoscaler` resource definition that you create is large enough to account for the total possible number of machines in your cluster. This value must encompass the number of control plane machines and the possible number of compute machines that you might scale to. -==== - -Every 10 seconds, the cluster autoscaler checks which nodes are unnecessary in the cluster and removes them. The cluster autoscaler considers a node for removal if the following conditions apply: - -* The node utilization is less than the _node utilization level_ threshold for the cluster. The node utilization level is the sum of the requested resources divided by the allocated resources for the node. If you do not specify a value in the `ClusterAutoscaler` custom resource, the cluster autoscaler uses a default value of `0.5`, which corresponds to 50% utilization. -* The cluster autoscaler can move all pods running on the node to the other nodes. The Kubernetes scheduler is responsible for scheduling pods on the nodes. -* The cluster autoscaler does not have scale down disabled annotation. - -If the following types of pods are present on a node, the cluster autoscaler will not remove the node: - -* Pods with restrictive pod disruption budgets (PDBs). -* Kube-system pods that do not run on the node by default. -* Kube-system pods that do not have a PDB or have a PDB that is too restrictive. -* Pods that are not backed by a controller object such as a deployment, replica set, or stateful set. -* Pods with local storage. -* Pods that cannot be moved elsewhere because of a lack of resources, incompatible node selectors or affinity, matching anti-affinity, and so on. -* Unless they also have a `"cluster-autoscaler.kubernetes.io/safe-to-evict": "true"` annotation, pods that have a `"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"` annotation. - -For example, you set the maximum CPU limit to 64 cores and configure the cluster autoscaler to only create machines that have 8 cores each. If your cluster starts with 30 cores, the cluster autoscaler can add up to 4 more nodes with 32 cores, for a total of 62. - -If you configure the cluster autoscaler, additional usage restrictions apply: - -* Do not modify the nodes that are in autoscaled node groups directly. All nodes within the same node group have the same capacity and labels and run the same system pods. -* Specify requests for your pods. -* If you have to prevent pods from being deleted too quickly, configure appropriate PDBs. -* Confirm that your cloud provider quota is large enough to support the maximum node pools that you configure. -* Do not run additional node group autoscalers, especially the ones offered by your cloud provider. - -The horizontal pod autoscaler (HPA) and the cluster autoscaler modify cluster resources in different ways. The HPA changes the deployment's or replica set's number of replicas based on the current CPU load. If the load increases, the HPA creates new replicas, regardless of the amount of resources available to the cluster. If there are not enough resources, the cluster autoscaler adds resources so that the HPA-created pods can run. If the load decreases, the HPA stops some replicas. If this action causes some nodes to be underutilized or completely empty, the cluster autoscaler deletes the unnecessary nodes. - -The cluster autoscaler takes pod priorities into account. The Pod Priority and Preemption feature enables scheduling pods based on priorities if the cluster does not have enough resources, but the cluster autoscaler ensures that the cluster has resources to run all pods. To honor the intention of both features, the cluster autoscaler includes a priority cutoff function. You can use this cutoff to schedule "best-effort" pods, which do not cause the cluster autoscaler to increase resources but instead run only when spare resources are available. - -Pods with priority lower than the cutoff value do not cause the cluster to scale up or prevent the cluster from scaling down. No new nodes are added to run the pods, and nodes running these pods might be deleted to free resources. - -Cluster autoscaling is supported for the platforms that have machine API available on it. - -//// -Default priority cutoff is 0. It can be changed using `--expendable-pods-priority-cutoff` flag, but we discourage it. cluster autoscaler also doesn't trigger scale-up if an unschedulable Pod is already waiting for a lower priority Pod preemption. -//// diff --git a/modules/cluster-autoscaler-cr.adoc b/modules/cluster-autoscaler-cr.adoc deleted file mode 100644 index 88e368c87f91..000000000000 --- a/modules/cluster-autoscaler-cr.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/applying-autoscaling.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: REFERENCE -[id="cluster-autoscaler-cr_{context}"] -= ClusterAutoscaler resource definition - -This `ClusterAutoscaler` resource definition shows the parameters and sample values for the cluster autoscaler. - - -[source,yaml] ----- -apiVersion: "autoscaling.openshift.io/v1" -kind: "ClusterAutoscaler" -metadata: - name: "default" -spec: - podPriorityThreshold: -10 <1> - resourceLimits: - maxNodesTotal: 24 <2> - cores: - min: 8 <3> - max: 128 <4> - memory: - min: 4 <5> - max: 256 <6> - gpus: - - type: nvidia.com/gpu <7> - min: 0 <8> - max: 16 <9> - - type: amd.com/gpu - min: 0 - max: 4 - logVerbosity: 4 <10> - scaleDown: <11> - enabled: true <12> - delayAfterAdd: 10m <13> - delayAfterDelete: 5m <14> - delayAfterFailure: 30s <15> - unneededTime: 5m <16> - utilizationThreshold: "0.4" <17> ----- -<1> Specify the priority that a pod must exceed to cause the cluster autoscaler to deploy additional nodes. Enter a 32-bit integer value. The `podPriorityThreshold` value is compared to the value of the `PriorityClass` that you assign to each pod. -<2> Specify the maximum number of nodes to deploy. This value is the total number of machines that are deployed in your cluster, not just the ones that the autoscaler controls. Ensure that this value is large enough to account for all of your control plane and compute machines and the total number of replicas that you specify in your `MachineAutoscaler` resources. -<3> Specify the minimum number of cores to deploy in the cluster. -<4> Specify the maximum number of cores to deploy in the cluster. -<5> Specify the minimum amount of memory, in GiB, in the cluster. -<6> Specify the maximum amount of memory, in GiB, in the cluster. -<7> Optional: Specify the type of GPU node to deploy. Only `nvidia.com/gpu` and `amd.com/gpu` are valid types. -<8> Specify the minimum number of GPUs to deploy in the cluster. -<9> Specify the maximum number of GPUs to deploy in the cluster. -<10> Specify the logging verbosity level between `0` and `10`. The following log level thresholds are provided for guidance: -+ --- -* `1`: (Default) Basic information about changes. -* `4`: Debug-level verbosity for troubleshooting typical issues. -* `9`: Extensive, protocol-level debugging information. --- -+ -If you do not specify a value, the default value of `1` is used. -<11> In this section, you can specify the period to wait for each action by using any valid link:https://golang.org/pkg/time/#ParseDuration[ParseDuration] interval, including `ns`, `us`, `ms`, `s`, `m`, and `h`. -<12> Specify whether the cluster autoscaler can remove unnecessary nodes. -<13> Optional: Specify the period to wait before deleting a node after a node has recently been _added_. If you do not specify a value, the default value of `10m` is used. -<14> Optional: Specify the period to wait before deleting a node after a node has recently been _deleted_. If you do not specify a value, the default value of `0s` is used. -<15> Optional: Specify the period to wait before deleting a node after a scale down failure occurred. If you do not specify a value, the default value of `3m` is used. -<16> Optional: Specify the period before an unnecessary node is eligible for deletion. If you do not specify a value, the default value of `10m` is used.<17> Optional: Specify the _node utilization level_ below which an unnecessary node is eligible for deletion. The node utilization level is the sum of the requested resources divided by the allocated resources for the node, and must be a value greater than `"0"` but less than `"1"`. If you do not specify a value, the cluster autoscaler uses a default value of `"0.5"`, which corresponds to 50% utilization. This value must be expressed as a string. -// Might be able to add a formula to show this visually, but need to look into asciidoc math formatting and what our tooling supports. - -[NOTE] -==== -When performing a scaling operation, the cluster autoscaler remains within the ranges set in the `ClusterAutoscaler` resource definition, such as the minimum and maximum number of cores to deploy or the amount of memory in the cluster. However, the cluster autoscaler does not correct the current values in your cluster to be within those ranges. - -The minimum and maximum CPUs, memory, and GPU values are determined by calculating those resources on all nodes in the cluster, even if the cluster autoscaler does not manage the nodes. For example, the control plane nodes are considered in the total memory in the cluster, even though the cluster autoscaler does not manage the control plane nodes. -==== diff --git a/modules/cluster-autoscaler-operator.adoc b/modules/cluster-autoscaler-operator.adoc deleted file mode 100644 index c37e338537ff..000000000000 --- a/modules/cluster-autoscaler-operator.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-autoscaler-operator_{context}"] -= Cluster Autoscaler Operator - -[discrete] -== Purpose - -The Cluster Autoscaler Operator manages deployments of the OpenShift Cluster Autoscaler using the `cluster-api` provider. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-autoscaler-operator[cluster-autoscaler-operator] - -[discrete] -== CRDs - -* `ClusterAutoscaler`: This is a singleton resource, which controls the configuration autoscaler instance for the cluster. The Operator only responds to the `ClusterAutoscaler` resource named `default` in the managed namespace, the value of the `WATCH_NAMESPACE` environment variable. -* `MachineAutoscaler`: This resource targets a node group and manages the annotations to enable and configure autoscaling for that group, the `min` and `max` size. Currently only `MachineSet` objects can be targeted. diff --git a/modules/cluster-bare-metal-operator.adoc b/modules/cluster-bare-metal-operator.adoc deleted file mode 100644 index cb5882fdfac2..000000000000 --- a/modules/cluster-bare-metal-operator.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-bare-metal-operator_{context}"] -ifdef::operator-ref[= Cluster Baremetal Operator] -ifdef::cluster-caps[= Bare-metal capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Baremetal Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster Baremetal Operator provides the features for the `baremetal` capability. - -endif::cluster-caps[] - -The Cluster Baremetal Operator (CBO) deploys all the components necessary to take a bare-metal server to a fully functioning worker node ready to run {product-title} compute nodes. The CBO ensures that the metal3 deployment, which consists of the Bare Metal Operator (BMO) and Ironic containers, runs on one of the control plane nodes within the {product-title} cluster. The CBO also listens for {product-title} updates to resources that it watches and takes appropriate action. - -ifdef::cluster-caps[] -The bare-metal capability is required for deployments using installer-provisioned infrastructure. Disabling the bare-metal capability can result in unexpected problems with these deployments. - -It is recommended that cluster administrators only disable the bare-metal capability during installations with user-provisioned infrastructure that do not have any `BareMetalHost` resources in the cluster. - -[IMPORTANT] -==== -If the bare-metal capability is disabled, the cluster cannot provision or manage bare-metal nodes. Only disable the capability if there are no `BareMetalHost` resources in your deployment. -==== -endif::cluster-caps[] - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-baremetal-operator[cluster-baremetal-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] diff --git a/modules/cluster-capi-operator.adoc b/modules/cluster-capi-operator.adoc deleted file mode 100644 index 9a3e1caeb48a..000000000000 --- a/modules/cluster-capi-operator.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-capi-operator_{context}"] -= Cluster CAPI Operator - -[NOTE] -==== -This Operator is available as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] for Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure clusters. -==== - -[discrete] -== Purpose - -The Cluster CAPI Operator maintains the lifecycle of Cluster API resources. This Operator is responsible for all administrative tasks related to deploying the Cluster API project within an {product-title} cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-capi-operator[cluster-capi-operator] - -[discrete] -== CRDs - -* `awsmachines.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `awsmachine` -** Validation: No - -* `gcpmachines.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `gcpmachine` -** Validation: No - -* `awsmachinetemplates.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `awsmachinetemplate` -** Validation: No - -* `gcpmachinetemplates.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `gcpmachinetemplate` -** Validation: No \ No newline at end of file diff --git a/modules/cluster-cloud-controller-config-osp.adoc b/modules/cluster-cloud-controller-config-osp.adoc deleted file mode 100644 index 2a10338e3833..000000000000 --- a/modules/cluster-cloud-controller-config-osp.adoc +++ /dev/null @@ -1,328 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-cloud-config-reference.adoc - -:_content-type: REFERENCE -[id="cluster-cloud-controller-config_{context}"] -= The OpenStack Cloud Controller Manager (CCM) config map - -An OpenStack CCM config map defines how your cluster interacts with your {rh-openstack} cloud. By default, this configuration is stored under the `cloud.conf` key in the `cloud-conf` config map in the `openshift-cloud-controller-manager` namespace. - -[IMPORTANT] -==== -The `cloud-conf` config map is generated from the `cloud-provider-config` config map in the `openshift-config` namespace. - -To change the settings that are described by the `cloud-conf` config map, modify the `cloud-provider-config` config map. - -As part of this synchronization, the CCM Operator overrides some options. For more information, see "The {rh-openstack} Cloud Controller Manager". -==== - -For example: - -.An example `cloud-conf` config map -[source,yaml] ----- -apiVersion: v1 -data: - cloud.conf: | - [Global] <1> - secret-name = openstack-credentials - secret-namespace = kube-system - region = regionOne - [LoadBalancer] - use-octavia = True -kind: ConfigMap -metadata: - creationTimestamp: "2022-12-20T17:01:08Z" - name: cloud-conf - namespace: openshift-cloud-controller-manager - resourceVersion: "2519" - uid: cbbeedaf-41ed-41c2-9f37-4885732d3677 ----- -<1> Set global options by using a `clouds.yaml` file rather than modifying the config map. - -The following options are present in the config map. Except when indicated otherwise, they are mandatory for clusters that run on {rh-openstack}. - -// [id="ccm-config-global-options"] -// == Global options - -// The following options are used for {rh-openstack} CCM authentication with the {rh-openstack} Identity service, also known as Keystone. They are similiar to the global options that you can set by using the `openstack` CLI. - -// |=== -// | Option | Description - -// | `ca-file` -// | Optional. The CA certificate bundle file for communication with the {rh-openstack} Identity service. If you use the HTTPS protocol with The Identity service URL, this option is required. - -// | `cert-file` -// | Optional. The client certificate path to use for client TLS authentication. - -// | `key-file` -// | Optional. The client private key path to use for client TLS authentication. - -// | `region` -// | The Identity service region name. - -// | `trust-id` -// | The Identity service trust ID. A trust represents the authorization of a user, or trustor, to delegate roles to another user, or trustee. Optionally, a trust authorizes the trustee to impersonate the trustor. You can find available trusts by querying the `/v3/OS-TRUST/trusts` endpoint of the Identity service API. - -// | `trustee-id` -// | The Identity service trustee user ID. - -// | `trustee-password` -// | The Identity service trustee user password. - -// | `application-credential-id` -// | The ID of an application credential to authenticate with. An `application-credential-secret` must be set along with this parameter. - -// | `application-credential-name` -// | The name of an application credential to authenticate with. If `application-credential-id` is not set, the user name and domain must be set. - -// | `application-credential-secret` -// | The secret of an application credential to authenticate with. - -// | `tls-insecure` -// | Whether or not to verify the server's TLS certificate. If set to `true`, the certificate is not verified. By default, the certificate is verified. -// |=== - - -// [id="ccm-config-networking-options"] -// == Networking options - -// |=== -// | Option | Description - -// | `ipv6-support-disabled` -// | Whether or not IPv6 is supported as indicated by a boolean value. By default, this option is `false`. - -// | `public-network-name` -// | The name of an {rh-openstack} Networking service, or Neutron, external network. The CCM uses this option when retrieving the external IP address of a Kubernetes node. This value can contain multiple names. Specified networks are bitwise ORed. The default value is `""`. - -// | `internal-network-name` -// | The name of a Networking service internal network. The CCM uses this option when retrieving the internal IP address of a Kubernetes node. This value can contain multiple names. Specified networks are bitwise ORed. The default value is `""`. - -// | `address-sort-order` -// | This configuration key affects how the provider reports node addresses to Kubernetes node resources. The default order depends on the hard-coded order in which the provider queries addresses and what the cloud returns. A specific order is not guaranteed. - -// To override this behavior, specify a comma-separated list of CIDR addresses. CCM sorts and groups all addresses that match the list in a prioritized manner, wherein the first retrieved item has a higher priority than the last. Addresses that do not match the list remain in their default order. The default value is `""`. - -// This option can be useful if you have multiple or dual-stack interfaces attached to a node that need a user-controlled, deterministic way of sorting addresses. -// |=== - -[id="ccm-config-lb-options"] -== Load balancer options - -CCM supports several load balancer options for deployments that use Octavia. - -[NOTE] -==== -Neutron-LBaaS support is deprecated. -==== - -|=== -| Option | Description - -| `enabled` -| Whether or not to enable the `LoadBalancer` type of services integration. The default value is `true`. - -// Always enforced. -// | `use-octavia` -// | Whether or not to use Octavia for the `LoadBalancer` type of service implementation rather than Neutron-LBaaS. The default value is `true`. - -| `floating-network-id` -| Optional. The external network used to create floating IP addresses for load balancer virtual IP addresses (VIPs). If there are multiple external networks in the cloud, this option must be set or the user must specify `loadbalancer.openstack.org/floating-network-id` in the service annotation. - -| `floating-subnet-id` -| Optional. The external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet-id`. - -| `floating-subnet` -| Optional. A name pattern (glob or regular expression if starting with `~`) for the external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet`. If multiple subnets match the pattern, the first one with available IP addresses is used. - -| `floating-subnet-tags` -| Optional. Tags for the external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet-tags`. If multiple subnets match these tags, the first one with available IP addresses is used. - -If the {rh-openstack} network is configured with sharing disabled, for example, with the `--no-share` flag used during creation, this option is unsupported. Set the network to share to use this option. - -| `lb-method` -| The load balancing algorithm used to create the load balancer pool. -For the Amphora provider the value can be `ROUND_ROBIN`, `LEAST_CONNECTIONS`, or `SOURCE_IP`. The default value is `ROUND_ROBIN`. - -For the OVN provider, only the `SOURCE_IP_PORT` algorithm is supported. - -For the Amphora provider, if using the `LEAST_CONNECTIONS` or `SOURCE_IP` methods, configure the `create-monitor` option as `true` in the `cloud-provider-config` config map on the `openshift-config` namespace and `ETP:Local` on the load-balancer type service to allow balancing algorithm enforcement in the client to service endpoint connections. - -| `lb-provider` -| Optional. Used to specify the provider of the load balancer, for example, `amphora` or `octavia`. Only the Amphora and Octavia providers are supported. - -| `lb-version` -| Optional. The load balancer API version. Only `"v2"` is supported. - -| `subnet-id` -| The ID of the Networking service subnet on which load balancer VIPs are created. - -// This ID is also used to create pool members if `member-subnet-id` is not set. - -// | `member-subnet-id` -// | ID of the Neutron network on which to create the members of the load balancer. The load balancer gets another network port on this subnet. Defaults to `subnet-id` if not set. - -| `network-id` -| The ID of the Networking service network on which load balancer VIPs are created. Unnecessary if `subnet-id` is set. - -// | `manage-security-groups` -// | If the Neutron security groups should be managed separately. Default: false - -| `create-monitor` -| Whether or not to create a health monitor for the service load balancer. A health monitor is required for services that declare `externalTrafficPolicy: Local`. The default value is `false`. - -This option is unsupported if you use {rh-openstack} earlier than version 17 with the `ovn` provider. - -| `monitor-delay` -| The interval in seconds by which probes are sent to members of the load balancer. The default value is `5`. - -| `monitor-max-retries` -| The number of successful checks that are required to change the operating status of a load balancer member to `ONLINE`. The valid range is `1` to `10`, and the default value is `1`. - -| `monitor-timeout` -| The time in seconds that a monitor waits to connect to the back end before it times out. The default value is `3`. - -| `internal-lb` -| Whether or not to create an internal load balancer without floating IP addresses. The default value is `false`. - -// | `cascade-delete` -// | Determines whether or not to perform cascade deletion of load balancers. Default: true. - -// | `flavor-id` -// | The id of the loadbalancer flavor to use. Uses octavia default if not set. - -// | `availability-zone` -// | The name of the loadbalancer availability zone to use. It is applicable if use-octavia is set to True and requires Octavia API version 2.14 or later (Ussuri release). The Octavia availability zone capabilities will not be used if it is not set. The parameter will be ignored if the Octavia version doesn't support availability zones yet. - -| `LoadBalancerClass "ClassName"` -a| This is a config section that comprises a set of options: - - * `floating-network-id` - * `floating-subnet-id` - * `floating-subnet` - * `floating-subnet-tags` - * `network-id` - * `subnet-id` - -// * `member-subnet-id` - -The behavior of these options is the same as that of the identically named options in the load balancer section of the CCM config file. - -You can set the `ClassName` value by specifying the service annotation `loadbalancer.openstack.org/class`. - -// | `enable-ingress-hostname` -// | Used with proxy protocol (set by annotation `loadbalancer.openstack.org/proxy-protocol: "true"`) by adding a dns suffix (nip.io) to the load balancer IP address. Default false. - -// This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding) is implemented. - -// | `ingress-hostname-suffix` -// | The dns suffix to the load balancer IP address when using proxy protocol. Default nip.io - -// This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding) is implemented. - -// | `default-tls-container-ref` -// | Reference to a tls container. This option works with Octavia, when this option is set then the cloud provider will create an Octavia Listener of type TERMINATED_HTTPS for a TLS Terminated loadbalancer. - -// Format for tls container ref: `https://{keymanager_host}/v1/containers/{uuid}` -// Check `container-store` parameter if you want to disable validation. - -// | `container-store` -// | Optional. Used to specify the store of the tls-container-ref, e.g. "barbican" or "external" - other store will cause a warning log. Default value - `barbican` - existence of tls container ref would always be performed. If set to `external` format for tls container ref will not be validated. - -| `max-shared-lb` -| The maximum number of services that can share a load balancer. The default value is `2`. -|=== - -// [id="ccm-config-metadata-options"] -// == Metadata options - -// |=== -// | Option | Description - -// | `search-order` -// | This configuration key affects the way that the provider retrieves metadata that relates to the instances in which it runs. The default value of `configDrive,metadataService` results in the provider retrieving metadata that relates to the instance from, if available, the config drive first,and then the metadata service. Alternative values are: -// * `configDrive`: Only retrieve instance metadata from the configuration drive. -// * `metadataService`: Only retrieve instance metadata from the metadata service. -// * `metadataService,configDrive`: Retrieve instance metadata from the metadata service first if available, and then retrieve instance metadata from the configuration drive. -// |=== - -// ### Multi region support (alpha) - -// | environment variable `OS_CCM_REGIONAL` is set to `true` - allow CCM to set ProviderID with region name `${ProviderName}://${REGION}/${instance-id}`. Default: false. - -[id="cluster-cloud-controller-config-overrides"] -== Options that the Operator overrides - -The CCM Operator overrides the following options, which you might recognize from configuring {rh-openstack}. Do not configure them yourself. They are included in this document for informational purposes only. - -|=== -| Option | Description - -| `auth-url` -| The {rh-openstack} Identity service URL. For example, `http://128.110.154.166/identity`. - -| `os-endpoint-type` -| The type of endpoint to use from the service catalog. - -// If unset, public endpoints are used. - -| `username` -| The Identity service user name. - -// Leave this option unset if you are using Identity service application credentials. - -| `password` -| The Identity service user password. - -// Leave this option unset if you are using Identity service application credentials. - -| `domain-id` -| The Identity service user domain ID. - -// Leave this option unset if you are using Identity service application credentials. - -| `domain-name` -| The Identity service user domain name. - -// This option is not required if you set `domain-id`. - -| `tenant-id` -| The Identity service project ID. Leave this option unset if you are using Identity service application credentials. - -In version 3 of the Identity API, which changed the identifier `tenant` to `project`, the value of `tenant-id` is automatically mapped to the project construct in the API. - -| `tenant-name` -| The Identity service project name. - -| `tenant-domain-id` -| The Identity service project domain ID. - -| `tenant-domain-name` -| The Identity service project domain name. - -| `user-domain-id` -| The Identity service user domain ID. - -| `user-domain-name` -| The Identity service user domain name. - -| `use-clouds` -a| Whether or not to fetch authorization credentials from a `clouds.yaml` file. Options set in this section are prioritized over values read from the `clouds.yaml` file. - -CCM searches for the file in the following places: - -. The value of the `clouds-file` option. -. A file path stored in the environment variable `OS_CLIENT_CONFIG_FILE`. -. The directory `pkg/openstack`. -. The directory `~/.config/openstack`. -. The directory `/etc/openstack`. - -| `clouds-file` -| The file path of a `clouds.yaml` file. It is used if the `use-clouds` option is set to `true`. - -| `cloud` -| The named cloud in the `clouds.yaml` file that you want to use. It is used if the `use-clouds` option is set to `true`. -|=== \ No newline at end of file diff --git a/modules/cluster-cloud-controller-manager-operator.adoc b/modules/cluster-cloud-controller-manager-operator.adoc deleted file mode 100644 index 15fcc72f076f..000000000000 --- a/modules/cluster-cloud-controller-manager-operator.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-cloud-controller-manager-operator_{context}"] -= Cluster Cloud Controller Manager Operator - -[discrete] -== Purpose - -[NOTE] -==== -This Operator is General Availability for Microsoft Azure Stack Hub, Nutanix, {rh-openstack-first}, and VMware vSphere. - -It is available as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] for Alibaba Cloud, Amazon Web Services (AWS), Google Cloud Platform (GCP), IBM Cloud, IBM Cloud Power VS, and Microsoft Azure. -==== - -The Cluster Cloud Controller Manager Operator manages and updates the cloud controller managers deployed on top of {product-title}. The Operator is based on the Kubebuilder framework and `controller-runtime` libraries. It is installed via the Cluster Version Operator (CVO). - -It contains the following components: - -* Operator -* Cloud configuration observer - -By default, the Operator exposes Prometheus metrics through the `metrics` service. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-cloud-controller-manager-operator[cluster-cloud-controller-manager-operator] diff --git a/modules/cluster-config-operator.adoc b/modules/cluster-config-operator.adoc deleted file mode 100644 index a2635eb7f37b..000000000000 --- a/modules/cluster-config-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-config-operator_{context}"] -= Cluster Config Operator - -[discrete] -== Purpose - -The Cluster Config Operator performs the following tasks related to `config.openshift.io`: - -* Creates CRDs. -* Renders the initial custom resources. -* Handles migrations. - - -[discrete] -== Project - -link:https://github.com/openshift/cluster-config-operator[cluster-config-operator] diff --git a/modules/cluster-csi-snapshot-controller-operator.adoc b/modules/cluster-csi-snapshot-controller-operator.adoc deleted file mode 100644 index 94231b46b00b..000000000000 --- a/modules/cluster-csi-snapshot-controller-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-csi-snapshot-controller-operator_{context}"] -ifdef::operator-ref[= Cluster CSI Snapshot Controller Operator] -ifdef::cluster-caps[= CSI snapshot controller capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster CSI Snapshot Controller Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster CSI Snapshot Controller Operator provides the features for the `CSISnapshot` capability. - -endif::cluster-caps[] - -The Cluster CSI Snapshot Controller Operator installs and maintains the CSI Snapshot Controller. The CSI Snapshot Controller is responsible for watching the `VolumeSnapshot` CRD objects and manages the creation and deletion lifecycle of volume snapshots. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-csi-snapshot-controller-operator[cluster-csi-snapshot-controller-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-dns-operator.adoc b/modules/cluster-dns-operator.adoc deleted file mode 100644 index fa88268d5ff1..000000000000 --- a/modules/cluster-dns-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="dns-operator_{context}"] -= DNS Operator - -[discrete] -== Purpose - -The DNS Operator deploys and manages CoreDNS to provide a name resolution service to pods that enables DNS-based Kubernetes Service discovery in {product-title}. - -The Operator creates a working default deployment based on the cluster's configuration. - -* The default cluster domain is `cluster.local`. -* Configuration of the CoreDNS Corefile or Kubernetes plugin is not yet supported. - -The DNS Operator manages CoreDNS as a Kubernetes daemon set exposed as a service with a static IP. CoreDNS runs on all nodes in the cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-dns-operator[cluster-dns-operator] diff --git a/modules/cluster-entitlements.adoc b/modules/cluster-entitlements.adoc deleted file mode 100644 index b5feae16a79e..000000000000 --- a/modules/cluster-entitlements.adoc +++ /dev/null @@ -1,175 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china-region.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc -// * architecture/architecture.adoc -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:restricted: -endif::[] - -:_content-type: CONCEPT -[id="cluster-entitlements_{context}"] -ifndef::openshift-origin[] -= Internet access for {product-title} - -In {product-title} {product-version}, you require access to the internet to -ifndef::restricted[] -install -endif::restricted[] -ifdef::restricted[] -obtain the images that are necessary to install -endif::restricted[] -your cluster. - -You must have internet access to: - -* Access {cluster-manager-url} to download the installation program and perform subscription management. If the cluster has internet access and you do not disable Telemetry, that service automatically entitles your cluster. -* Access link:http://quay.io[Quay.io] to obtain the packages that are required to install your cluster. -* Obtain the packages that are required to perform cluster updates. -ifdef::openshift-enterprise,openshift-webscale[] - -ifndef::restricted[] -[IMPORTANT] -==== -If your cluster cannot have direct internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the required content and use it to populate a mirror registry with the installation packages. With some installation types, the environment that you install your cluster in will not require internet access. Before you update the cluster, you update the content of the mirror registry. -==== -endif::restricted[] - -endif::openshift-enterprise,openshift-webscale[] -endif::openshift-origin[] - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!restricted: -endif::[] diff --git a/modules/cluster-image-registry-operator.adoc b/modules/cluster-image-registry-operator.adoc deleted file mode 100644 index 545abba8f2fd..000000000000 --- a/modules/cluster-image-registry-operator.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-image-registry-operator_{context}"] -= Cluster Image Registry Operator - -[discrete] -== Purpose - -The Cluster Image Registry Operator manages a singleton instance of the {product-registry}. It manages all configuration of the registry, including creating storage. - -On initial start up, the Operator creates a default `image-registry` resource instance based on the configuration detected in the cluster. This indicates what cloud storage type to use based on the cloud provider. - -If insufficient information is available to define a complete `image-registry` resource, then an incomplete resource is defined and the Operator updates the resource status with information about what is missing. - -The Cluster Image Registry Operator runs in the `openshift-image-registry` namespace and it also manages the registry instance in that location. All configuration and workload resources for the registry reside in that namespace. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-image-registry-operator[cluster-image-registry-operator] diff --git a/modules/cluster-kube-scheduler-operator.adoc b/modules/cluster-kube-scheduler-operator.adoc deleted file mode 100644 index b5e8d14bfd93..000000000000 --- a/modules/cluster-kube-scheduler-operator.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-kube-scheduler-operator_{context}"] -= Kubernetes Scheduler Operator - -[discrete] -== Purpose - -The Kubernetes Scheduler Operator manages and updates the Kubernetes Scheduler deployed on top of {product-title}. The Operator is based on the {product-title} `library-go` framework and it is installed with the Cluster Version Operator (CVO). - -The Kubernetes Scheduler Operator contains the following components: - -* Operator -* Bootstrap manifest renderer -* Installer based on static pods -* Configuration observer - -By default, the Operator exposes Prometheus metrics through the metrics service. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-scheduler-operator[cluster-kube-scheduler-operator] - -[discrete] -== Configuration - -The configuration for the Kubernetes Scheduler is the result of merging: - -* a default configuration. -* an observed configuration from the spec `schedulers.config.openshift.io`. - -All of these are sparse configurations, invalidated JSON snippets which are merged to form a valid configuration at the end. diff --git a/modules/cluster-kube-storage-version-migrator-operator.adoc b/modules/cluster-kube-storage-version-migrator-operator.adoc deleted file mode 100644 index a92f41701369..000000000000 --- a/modules/cluster-kube-storage-version-migrator-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-kube-storage-version-migrator-operator_{context}"] -= Kubernetes Storage Version Migrator Operator - -[discrete] -== Purpose - -The Kubernetes Storage Version Migrator Operator detects changes of the default storage version, creates migration requests for resource types when the storage version changes, and processes migration requests. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-storage-version-migrator-operator[cluster-kube-storage-version-migrator-operator] diff --git a/modules/cluster-limitations-local-zone.adoc b/modules/cluster-limitations-local-zone.adoc deleted file mode 100644 index c2e83896622f..000000000000 --- a/modules/cluster-limitations-local-zone.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-aws-localzone.adoc - -:_content-type: CONCEPT - -[id="cluster-limitations-local-zone_{context}"] -= Cluster limitations in AWS Local Zones - -Some limitations exist when you attempt to deploy a cluster with a default installation configuration in Amazon Web Services (AWS) Local Zones. - -[IMPORTANT] -==== -The following list details limitations when deploying a cluster in AWS Local Zones: - -- The Maximum Transmission Unit (MTU) between an Amazon EC2 instance in a Local Zone and an Amazon EC2 instance in the Region is `1300`. This causes the cluster-wide network MTU to change according to the network plugin that is used on the deployment. -- Network resources such as Network Load Balancer (NLB), Classic Load Balancer, and Network Address Translation (NAT) Gateways are not supported in AWS Local Zones. -- For an {product-title} cluster on AWS, the AWS Elastic Block Storage (EBS) `gp3` type volume is the default for node volumes and the default for the storage class. This volume type is not globally available on Local Zone locations. By default, the nodes running in Local Zones are deployed with the `gp2` EBS volume. The `gp2-csi` `StorageClass` must be set when creating workloads on Local Zone nodes. -==== diff --git a/modules/cluster-logging-Uninstall-logging.adoc b/modules/cluster-logging-Uninstall-logging.adoc deleted file mode 100644 index b7dfe0709216..000000000000 --- a/modules/cluster-logging-Uninstall-logging.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-uninstall-logging-about_{context}"] -= About uninstalling {product-title} Logging - -You can stop log aggregation by deleting the ClusterLogging custom resource (CR). After deleting the CR, there are other cluster logging components that remain, which you can optionally remove. diff --git a/modules/cluster-logging-about-collector.adoc b/modules/cluster-logging-about-collector.adoc deleted file mode 100644 index 4b6251b5841d..000000000000 --- a/modules/cluster-logging-about-collector.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-collector_{context}"] -= About the logging collector - -The {logging-title} collects container and node logs. - -By default, the log collector uses the following sources: - -* journald for all system logs -* `/var/log/containers/*.log` for all container logs - -If you configure the log collector to collect audit logs, it gets them from `/var/log/audit/audit.log`. - -The logging collector is a daemon set that deploys pods to each {product-title} node. System and infrastructure logs are generated by journald log messages from the operating system, the container runtime, and {product-title}. Application logs are generated by the CRI-O container engine. Fluentd collects the logs from these sources and forwards them internally or externally as you configure in {product-title}. - -The container runtimes provide minimal information to identify the source of log messages: project, pod name, and container ID. This information is not sufficient to uniquely identify the source of the logs. If a pod with a given name and project is deleted before the log collector begins processing its logs, information from the API server, such as labels and annotations, might not be available. There might not be a way to distinguish the log messages from a similarly named pod and project or trace the logs to their source. This limitation means that log collection and normalization are considered *best effort*. - -[IMPORTANT] -==== -The available container runtimes provide minimal information to identify the -source of log messages and do not guarantee unique individual log -messages or that these messages can be traced to their source. -==== diff --git a/modules/cluster-logging-about-components.adoc b/modules/cluster-logging-about-components.adoc deleted file mode 100644 index 14d79d9bfe43..000000000000 --- a/modules/cluster-logging-about-components.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-components_{context}"] -= About {logging} components - -The {logging} components include a collector deployed to each node in the {product-title} cluster -that collects all node and container logs and writes them to a log store. You can use a centralized web UI to create rich visualizations and dashboards with the aggregated data. - -The major components of the {logging} are: - -* collection - This is the component that collects logs from the cluster, formats them, and forwards them to the log store. The current implementation is Fluentd. -* log store - This is where the logs are stored. The default implementation is Elasticsearch. You can use the default Elasticsearch log store or forward logs to external log stores. The default log store is optimized and tested for short-term storage. -* visualization - This is the UI component you can use to view logs, graphs, charts, and so forth. The current implementation is Kibana. - diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc deleted file mode 100644 index 5c2a76f06048..000000000000 --- a/modules/cluster-logging-about-crd.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-configuring-crd_{context}"] -= About the ClusterLogging custom resource - -To make changes to your {logging} environment, create and modify the `ClusterLogging` custom resource (CR). - -Instructions for creating or modifying a CR are provided in this documentation as appropriate. - -The following example shows a typical custom resource for the {logging}. - -[id="efk-logging-configuring-about-sample_{context}"] -.Sample `ClusterLogging` custom resource (CR) -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" <2> -spec: - managementState: "Managed" <3> - logStore: - type: "elasticsearch" <4> - retentionPolicy: - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 - resources: - limits: - memory: 16Gi - requests: - cpu: "1" - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: <5> - type: "kibana" - kibana: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi - replicas: 1 - collection: <6> - logs: - type: "fluentd" - fluentd: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi ----- -<1> The CR name must be `instance`. -<2> The CR must be installed to the `openshift-logging` namespace. -<3> The Red Hat OpenShift Logging Operator management state. When set to `unmanaged` the operator is in an unsupported state and will not get updates. -<4> Settings for the log store, including retention policy, the number of nodes, the resource requests and limits, and the storage class. -<5> Settings for the visualizer, including the resource requests and limits, and the number of pod replicas. -<6> Settings for the log collector, including the resource requests and limits. -endif::[] diff --git a/modules/cluster-logging-about-logstore.adoc b/modules/cluster-logging-about-logstore.adoc deleted file mode 100644 index d51ab1623e43..000000000000 --- a/modules/cluster-logging-about-logstore.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-logstore_{context}"] -= About the log store - -By default, {product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to store log data. Optionally you can use the Log Forwarder API to forward logs to an external store. Several types of store are supported, including fluentd, rsyslog, kafka and others. - -The {logging} Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. - -Elasticsearch organizes the log data from Fluentd into datastores, or _indices_, then subdivides each index into multiple pieces called _shards_, which it spreads across a set of Elasticsearch nodes in an Elasticsearch cluster. You can configure Elasticsearch to make copies of the shards, called _replicas_, which Elasticsearch also spreads across the Elasticsearch nodes. The `ClusterLogging` custom resource (CR) allows you to specify how the shards are replicated to provide data redundancy and resilience to failure. You can also specify how long the different types of logs are retained using a retention policy in the `ClusterLogging` CR. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -The Red Hat OpenShift Logging Operator and companion OpenShift Elasticsearch Operator ensure that each Elasticsearch node is deployed using a unique deployment that includes its own storage volume. -You can use a `ClusterLogging` custom resource (CR) to increase the number of Elasticsearch nodes, as needed. -See the link:https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html[Elasticsearch documentation] for considerations involved in configuring storage. - -[NOTE] -==== -A highly-available Elasticsearch environment requires at least three Elasticsearch nodes, each on a different host. -==== - -Role-based access control (RBAC) applied on the Elasticsearch indices enables the controlled access of the logs to the developers. Administrators can access all logs and developers can access only the logs in their projects. diff --git a/modules/cluster-logging-about-visualizer.adoc b/modules/cluster-logging-about-visualizer.adoc deleted file mode 100644 index 607f8a4d912d..000000000000 --- a/modules/cluster-logging-about-visualizer.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-visualizer_{context}"] -= About logging visualization - -{product-title} uses Kibana to display the log data collected by Fluentd and indexed by Elasticsearch. - -Kibana is a browser-based console interface to query, discover, and visualize your Elasticsearch data through -histograms, line graphs, pie charts, and other visualizations. - diff --git a/modules/cluster-logging-about.adoc b/modules/cluster-logging-about.adoc deleted file mode 100644 index 257ad90ea41c..000000000000 --- a/modules/cluster-logging-about.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/support/virt-openshift-cluster-monitoring.adoc -// * logging/cluster-logging.adoc -// * serverless/monitor/cluster-logging-serverless.adoc - -// This module uses conditionalized paragraphs so that the module -// can be re-used in associated products. - -:_content-type: CONCEPT -[id="cluster-logging-about_{context}"] -= About deploying the {logging-title} - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -{product-title} cluster administrators can deploy the {logging} using the {product-title} web console or CLI to install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. When the Operators are installed, you create a `ClusterLogging` custom resource (CR) to schedule {logging} pods and other resources necessary to support the {logging}. The Operators are responsible for deploying, upgrading, and maintaining the {logging}. -endif::openshift-enterprise,openshift-webscale,openshift-origin[] - -The `ClusterLogging` CR defines a complete {logging} environment that includes all the components of the logging stack to collect, store and visualize logs. The Red Hat OpenShift Logging Operator watches the {logging} CR and adjusts the logging deployment accordingly. - -Administrators and application developers can view the logs of the projects for which they have view access. diff --git a/modules/cluster-logging-clo-status-comp.adoc b/modules/cluster-logging-clo-status-comp.adoc deleted file mode 100644 index ce35d99ce49f..000000000000 --- a/modules/cluster-logging-clo-status-comp.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-cluster-status.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-clo-status-example_{context}"] -= Viewing the status of {logging} components - -You can view the status for a number of {logging} components. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. View the status of the {logging-title} environment: -+ -[source,terminal] ----- -$ oc describe deployment cluster-logging-operator ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Available True MinimumReplicasAvailable - Progressing True NewReplicaSetAvailable - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ScalingReplicaSet 62m deployment-controller Scaled up replica set cluster-logging-operator-574b8987df to 1---- ----- - -. View the status of the {logging} replica set: - -.. Get the name of a replica set: -+ -.Example output -[source,terminal] ----- -$ oc get replicaset ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AGE -cluster-logging-operator-574b8987df 1 1 1 159m -elasticsearch-cdm-uhr537yu-1-6869694fb 1 1 1 157m -elasticsearch-cdm-uhr537yu-2-857b6d676f 1 1 1 156m -elasticsearch-cdm-uhr537yu-3-5b6fdd8cfd 1 1 1 155m -kibana-5bd5544f87 1 1 1 157m ----- - -.. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaset cluster-logging-operator-574b8987df ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator-574b8987df - -.... - -Replicas: 1 current / 1 desired -Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulCreate 66m replicaset-controller Created pod: cluster-logging-operator-574b8987df-qjhqv---- ----- diff --git a/modules/cluster-logging-clo-status.adoc b/modules/cluster-logging-clo-status.adoc deleted file mode 100644 index 14b1b1d9edd4..000000000000 --- a/modules/cluster-logging-clo-status.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-cluster-status.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-clo-status_{context}"] -= Viewing the status of the Red Hat OpenShift Logging Operator - -You can view the status of your Red Hat OpenShift Logging Operator. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. To view the OpenShift Logging status: - -.. Get the OpenShift Logging status: -+ -[source,terminal] ----- -$ oc get clusterlogging instance -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -.... - -status: <1> - collection: - logs: - fluentdStatus: - daemonSet: fluentd <2> - nodes: - fluentd-2rhqp: ip-10-0-169-13.ec2.internal - fluentd-6fgjh: ip-10-0-165-244.ec2.internal - fluentd-6l2ff: ip-10-0-128-218.ec2.internal - fluentd-54nx5: ip-10-0-139-30.ec2.internal - fluentd-flpnn: ip-10-0-147-228.ec2.internal - fluentd-n2frh: ip-10-0-157-45.ec2.internal - pods: - failed: [] - notReady: [] - ready: - - fluentd-2rhqp - - fluentd-54nx5 - - fluentd-6fgjh - - fluentd-6l2ff - - fluentd-flpnn - - fluentd-n2frh - logstore: <3> - elasticsearchStatus: - - ShardAllocationEnabled: all - cluster: - activePrimaryShards: 5 - activeShards: 5 - initializingShards: 0 - numDataNodes: 1 - numNodes: 1 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterName: elasticsearch - nodeConditions: - elasticsearch-cdm-mkkdys93-1: - nodeCount: 1 - pods: - client: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - data: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - master: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c -visualization: <4> - kibanaStatus: - - deployment: kibana - pods: - failed: [] - notReady: [] - ready: - - kibana-7fb4fd4cc9-f2nls - replicaSets: - - kibana-7fb4fd4cc9 - replicas: 1 ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> Information on the Fluentd pods. -<3> Information on the Elasticsearch pods, including Elasticsearch cluster health, `green`, `yellow`, or `red`. -<4> Information on the Kibana pods. - - -[id="cluster-logging-clo-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status.Nodes` section of the OpenShift Logging instance. - - -// https://github.com/openshift/elasticsearch-operator/pull/92 - -A status message similar to the following indicates a node has exceeded the configured low watermark and no shard will be allocated to this node: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-clientdatamaster-0-1 - upgradeStatus: {} ----- - -A status message similar to the following indicates a node has exceeded the configured high watermark and shards will be relocated to other nodes: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: cluster-logging-operator - upgradeStatus: {} ----- - -A status message similar to the following indicates the Elasticsearch node selector in the CR does not match any nodes in the cluster: - -.Example output -[source,text] ----- - Elasticsearch Status: - Shard Allocation Enabled: shard allocation unknown - Cluster: - Active Primary Shards: 0 - Active Shards: 0 - Initializing Shards: 0 - Num Data Nodes: 0 - Num Nodes: 0 - Pending Tasks: 0 - Relocating Shards: 0 - Status: cluster health unknown - Unassigned Shards: 0 - Cluster Name: elasticsearch - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: 0/5 nodes are available: 5 node(s) didn't match node selector. - Reason: Unschedulable - Status: True - Type: Unschedulable - elasticsearch-cdm-mkkdys93-2: - Node Count: 2 - Pods: - Client: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Data: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Master: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: ----- - -A status message similar to the following indicates that the requested PVC could not bind to PV: - -.Example output -[source,text] ----- - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - Reason: Unschedulable - Status: True - Type: Unschedulable ----- - -A status message similar to the following indicates that the Fluentd pods cannot be scheduled because the node selector did not match any nodes: - -.Example output -[source,yaml] ----- -Status: - Collection: - Logs: - Fluentd Status: - Daemon Set: fluentd - Nodes: - Pods: - Failed: - Not Ready: - Ready: ----- diff --git a/modules/cluster-logging-cloudwatch.adoc b/modules/cluster-logging-cloudwatch.adoc deleted file mode 100644 index 618bf932c5b8..000000000000 --- a/modules/cluster-logging-cloudwatch.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc -// -// This module uses conditionalized paragraphs so that the module -// can be re-used in associated products. - -:_content-type: CONCEPT -[id="cluster-logging-cloudwatch_{context}"] -= CloudWatch recommendation for {product-title} - -Red Hat recommends that you use the AWS CloudWatch solution for your logging needs. - -[id="cluster-logging-requirements-explained_{context}"] -== Logging requirements - -Hosting your own logging stack requires a large amount of compute resources and storage, which might be dependent on your cloud service quota. The compute resource requirements can start at 48 GB or more, while the storage requirement can be as large as 1600 GB or more. The logging stack runs on your worker nodes, which reduces your available workload resource. With these considerations, hosting your own logging stack increases your cluster operating costs. \ No newline at end of file diff --git a/modules/cluster-logging-collecting-storing-kubernetes-events.adoc b/modules/cluster-logging-collecting-storing-kubernetes-events.adoc deleted file mode 100644 index fb0a9dcb39b5..000000000000 --- a/modules/cluster-logging-collecting-storing-kubernetes-events.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-collecting-storing-kubernetes-events-about_{context}"] -= About collecting and storing Kubernetes events - -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by {product-title} Logging. You must manually deploy the Event Router. diff --git a/modules/cluster-logging-collector-alerts-viewing.adoc b/modules/cluster-logging-collector-alerts-viewing.adoc deleted file mode 100644 index 6d11df002d37..000000000000 --- a/modules/cluster-logging-collector-alerts-viewing.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-alerts-viewing_{context}"] -= Viewing logging collector alerts - -Alerts are shown in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console, -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}, -endif::[] -on the *Alerts* tab of the Alerting UI. Alerts are in one of the following states: - -* *Firing*. The alert condition is true for the duration of the timeout. Click the *Options* menu at the end of the firing alert to view more information or silence the alert. -* *Pending* The alert condition is currently true, but the timeout has not been reached. -* *Not Firing*. The alert is not currently triggered. - -.Procedure - -To view the {logging} and other {product-title} alerts: - -. In the {product-title} console, click *Observe* → *Alerting*. - -. Click the *Alerts* tab. The alerts are listed, based on the filters selected. diff --git a/modules/cluster-logging-collector-alerts.adoc b/modules/cluster-logging-collector-alerts.adoc deleted file mode 100644 index 453c2076b96c..000000000000 --- a/modules/cluster-logging-collector-alerts.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: CONCEPT -[id="cluster-logging-collector-alerts_{context}"] -= About logging collector alerts - -The following alerts are generated by the logging collector. You can view these alerts in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -on the *Alerts* page of the Alerting UI. - -.Fluentd Prometheus alerts -[cols="2,2,2,1",options="header"] -|=== -|Alert |Message |Description |Severity - -|`FluentDHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is high, by default more than 10 in the previous 15 minutes. -|Warning - -|`FluentdNodeDown` -|`Prometheus could not scrape fluentd for more than 10m.` -|Fluentd is reporting that Prometheus could not scrape a specific Fluentd instance. -|Critical - -|`FluentdQueueLengthIncreasing` -|`In the last 12h, fluentd buffer queue length constantly increased more than 1. Current value is .` -|Fluentd is reporting that the queue size is increasing. -|Critical - -|`FluentDVeryHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is very high, by default more than 25 in the previous 15 minutes. -|Critical - -|=== diff --git a/modules/cluster-logging-collector-collecting-ovn-logs.adoc b/modules/cluster-logging-collector-collecting-ovn-logs.adoc deleted file mode 100644 index 3558b5ee370a..000000000000 --- a/modules/cluster-logging-collector-collecting-ovn-logs.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collecting-ovn-audit-logs_{context}"] -= Collecting OVN network policy audit logs - -You can collect the OVN network policy audit logs from the `/var/log/ovn/acl-audit-log.log` file on OVN-Kubernetes pods and forward them to logging servers. - -.Prerequisites - -* You are using {product-title} version 4.8 or later. -* You are using Cluster Logging 5.2 or later. -* You have already set up a `ClusterLogForwarder` custom resource (CR) object. -* The {product-title} cluster is configured for OVN-Kubernetes network policy audit logging. See the following "Additional resources" section. - -[NOTE] -==== -Often, logging servers that store audit data must meet organizational and governmental requirements for compliance and security. -==== - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object as described in other topics on forwarding logs to third-party systems. - -. In the YAML file, add the `audit` log type to the `inputRefs` element in a pipeline. For example: -+ -[source,yaml] ----- - pipelines: - - name: audit-logs - inputRefs: - - audit <1> - outputRefs: - - secure-logging-server <2> ----- -<1> Specify `audit` as one of the log types to input. -<2> Specify the output that connects to your logging server. - -. Recreate the updated CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Verification - -Verify that audit log entries from the nodes that you are monitoring are present among the log data gathered by the logging server. - -Find an original audit log entry in `/var/log/ovn/acl-audit-log.log` and compare it with the corresponding log entry on the logging server. - -For example, an original log entry in `/var/log/ovn/acl-audit-log.log` might look like this: - -[source,txt] ----- -2021-07-06T08:26:58.687Z|00004|acl_log(ovn_pinctrl0)|INFO|name="verify-audit- -logging_deny-all", verdict=drop, severity=alert: -icmp,vlan_tci=0x0000,dl_src=0a:58:0a:81:02:12,dl_dst=0a:58:0a:81:02:14,nw_src=10 -.129.2.18,nw_dst=10.129.2.20,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 ----- - -And the corresponding OVN audit log entry you find on the logging server might look like this: - -[source,json] ----- -{ - "@timestamp" : "2021-07-06T08:26:58..687000+00:00", - "hostname":"ip.abc.iternal", - "level":"info", - "message" : "2021-07-06T08:26:58.687Z|00004|acl_log(ovn_pinctrl0)|INFO|name=\"verify-audit-logging_deny-all\", verdict=drop, severity=alert: icmp,vlan_tci=0x0000,dl_src=0a:58:0a:81:02:12,dl_dst=0a:58:0a:81:02:14,nw_src=10.129.2.18,nw_dst=10.129.2.20,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0" -} ----- - -Where: - -* `@timestamp` is the timestamp of the log entry. -* `hostname` is the node from which the log originated. -* `level` is the log entry. -* `message` is the original audit log message. - -[NOTE] -==== -On an Elasticsearch server, look for log entries whose indices begin with `audit-00000`. -==== - -.Troubleshooting - -. Verify that your {product-title} cluster meets all the prerequisites. -. Verify that you have completed the procedure. -. Verify that the nodes generating OVN logs are enabled and have `/var/log/ovn/acl-audit-log.log` files. -. Check the Fluentd pod logs for issues. diff --git a/modules/cluster-logging-collector-limits.adoc b/modules/cluster-logging-collector-limits.adoc deleted file mode 100644 index 32de5dd04337..000000000000 --- a/modules/cluster-logging-collector-limits.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-limits_{context}"] -= Configure log collector CPU and memory limits - -The log collector allows for adjustments to both the CPU and memory limits. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - collection: - logs: - fluentd: - resources: - limits: <1> - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. diff --git a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-cloudwatch.adoc deleted file mode 100644 index 45e11be8b076..000000000000 --- a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc +++ /dev/null @@ -1,290 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-cloudwatch_{context}"] -= Forwarding logs to Amazon CloudWatch - -You can forward logs to Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). You can forward logs to CloudWatch in addition to, or instead of, the default log store. - -To configure log forwarding to CloudWatch, you must create a `ClusterLogForwarder` custom resource (CR) with an output for CloudWatch, and a pipeline that uses the output. - -.Procedure - -. Create a `Secret` YAML file that uses the `aws_access_key_id` and `aws_secret_access_key` fields to specify your base64-encoded AWS credentials. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: cw-secret - namespace: openshift-logging -data: - aws_access_key_id: QUtJQUlPU0ZPRE5ON0VYQU1QTEUK - aws_secret_access_key: d0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQo= ----- - -. Create the secret. For example: -+ -[source,terminal] ----- -$ oc apply -f cw-secret.yaml ----- - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the name of the secret. For example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: cw <3> - type: cloudwatch <4> - cloudwatch: - groupBy: logType <5> - groupPrefix: <6> - region: us-east-2 <7> - secret: - name: cw-secret <8> - pipelines: - - name: infra-logs <9> - inputRefs: <10> - - infrastructure - - audit - - application - outputRefs: - - cw <11> ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `cloudwatch` type. -<5> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type -* `namespaceName` creates a log group for each application name space. It also creates separate log groups for infrastructure and audit logs. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<6> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<7> Specify the AWS region. -<8> Specify the name of the secret that contains your AWS credentials. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Example: Using ClusterLogForwarder with Amazon CloudWatch - -Here, you see an example `ClusterLogForwarder` custom resource (CR) and the log data that it outputs to Amazon CloudWatch. - -Suppose that you are running -ifndef::openshift-rosa[] -an {product-title} cluster -endif::[] -ifdef::openshift-rosa[] -a ROSA cluster -endif::[] -named `mycluster`. The following command returns the cluster's `infrastructureName`, which you will use to compose `aws` commands later on: - -[source,terminal] ----- -$ oc get Infrastructure/cluster -ojson | jq .status.infrastructureName -"mycluster-7977k" ----- - -To generate log data for this example, you run a `busybox` pod in a namespace called `app`. The `busybox` pod writes a message to stdout every three seconds: - -[source,terminal] ----- -$ oc run busybox --image=busybox -- sh -c 'while true; do echo "My life is my message"; sleep 3; done' -$ oc logs -f busybox -My life is my message -My life is my message -My life is my message -... ----- - -You can look up the UUID of the `app` namespace where the `busybox` pod runs: - -[source,terminal] ----- -$ oc get ns/app -ojson | jq .metadata.uid -"794e1e1a-b9f5-4958-a190-e76a9b53d7bf" ----- - -In your `ClusterLogForwarder` custom resource (CR), you configure the `infrastructure`, `audit`, and `application` log types as inputs to the `all-logs` pipeline. You also connect this pipeline to `cw` output, which forwards the logs to a CloudWatch instance in the `us-east-2` region: - -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: cw - type: cloudwatch - cloudwatch: - groupBy: logType - region: us-east-2 - secret: - name: cw-secret - pipelines: - - name: all-logs - inputRefs: - - infrastructure - - audit - - application - outputRefs: - - cw ----- - -Each region in CloudWatch contains three levels of objects: - -* log group -** log stream -*** log event - - -With `groupBy: logType` in the `ClusterLogForwarding` CR, the three log types in the `inputRefs` produce three log groups in Amazon Cloudwatch: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.application" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -Each of the log groups contains log streams: - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.application | jq .logStreams[].logStreamName -"kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log" ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.audit | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.k8s-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.linux-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.openshift-audit.log" -... ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.infrastructure | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-69f9fd9b58-zqzw5_openshift-oauth-apiserver_oauth-apiserver-453c5c4ee026fe20a6139ba6b1cdd1bed25989c905bf5ac5ca211b7cbb5c3d7b.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-ce51532df7d4e4d5f21c4f4be05f6575b93196336be0027067fd7d93d70f66a4.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-check-endpoints-82a9096b5931b5c3b1d6dc4b66113252da4a6472c9fff48623baee761911a9ef.log" -... ----- - -Each log stream contains log events. To see a log event from the `busybox` Pod, you specify its log stream from the `application` log group: - -[source,terminal] ----- -$ aws logs get-log-events --log-group-name mycluster-7977k.application --log-stream-name kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log -{ - "events": [ - { - "timestamp": 1629422704178, - "message": "{\"docker\":{\"container_id\":\"da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76\"},\"kubernetes\":{\"container_name\":\"busybox\",\"namespace_name\":\"app\",\"pod_name\":\"busybox\",\"container_image\":\"docker.io/library/busybox:latest\",\"container_image_id\":\"docker.io/library/busybox@sha256:0f354ec1728d9ff32edcd7d1b8bbdfc798277ad36120dc3dc683be44524c8b60\",\"pod_id\":\"870be234-90a3-4258-b73f-4f4d6e2777c7\",\"host\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"labels\":{\"run\":\"busybox\"},\"master_url\":\"https://kubernetes.default.svc\",\"namespace_id\":\"794e1e1a-b9f5-4958-a190-e76a9b53d7bf\",\"namespace_labels\":{\"kubernetes_io/metadata_name\":\"app\"}},\"message\":\"My life is my message\",\"level\":\"unknown\",\"hostname\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"pipeline_metadata\":{\"collector\":{\"ipaddr4\":\"10.0.216.3\",\"inputname\":\"fluent-plugin-systemd\",\"name\":\"fluentd\",\"received_at\":\"2021-08-20T01:25:08.085760+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-20T01:25:04.178986+00:00\",\"viaq_index_name\":\"app-write\",\"viaq_msg_id\":\"NWRjZmUyMWQtZjgzNC00MjI4LTk3MjMtNTk3NmY3ZjU4NDk1\",\"log_type\":\"application\",\"time\":\"2021-08-20T01:25:04+00:00\"}", - "ingestionTime": 1629422744016 - }, -... ----- - -.Example: Customizing the prefix in log group names - -In the log group names, you can replace the default `infrastructureName` prefix, `mycluster-7977k`, with an arbitrary string like `demo-group-prefix`. To make this change, you update the `groupPrefix` field in the `ClusterLogForwarding` CR: - -[source,yaml] ----- -cloudwatch: - groupBy: logType - groupPrefix: demo-group-prefix - region: us-east-2 ----- - -The value of `groupPrefix` replaces the default `infrastructureName` prefix: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"demo-group-prefix.application" -"demo-group-prefix.audit" -"demo-group-prefix.infrastructure" ----- - -.Example: Naming log groups after application namespace names - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the name of the application namespace. - -If you delete an application namespace object and create a new one that has the same name, CloudWatch continues using the same log group as before. - -If you consider successive application namespace objects that have the same name as equivalent to each other, use the approach described in this example. Otherwise, if you need to distinguish the resulting log groups from each other, see the following "Naming log groups for application namespace UUIDs" section instead. - -To create application log groups whose names are based on the names of the application namespaces, you set the value of the `groupBy` field to `namespaceName` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceName - region: us-east-2 ----- - -Setting `groupBy` to `namespaceName` affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -In Amazon Cloudwatch, the namespace name appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.app` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.app" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -If the cluster in this example had contained multiple application namespaces, the output would show multiple log groups, one for each namespace. - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -.Example: Naming log groups after application namespace UUIDs - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the UUID of the application namespace. - -If you delete an application namespace object and create a new one, CloudWatch creates a new log group. - -If you consider successive application namespace objects with the same name as different from each other, use the approach described in this example. Otherwise, see the preceding "Example: Naming log groups for application namespace names" section instead. - -To name log groups after application namespace UUIDs, you set the value of the `groupBy` field to `namespaceUUID` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceUUID - region: us-east-2 ----- - -In Amazon Cloudwatch, the namespace UUID appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf" // uid of the "app" namespace -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. \ No newline at end of file diff --git a/modules/cluster-logging-collector-log-forward-es.adoc b/modules/cluster-logging-collector-log-forward-es.adoc deleted file mode 100644 index e95fc93311e7..000000000000 --- a/modules/cluster-logging-collector-log-forward-es.adoc +++ /dev/null @@ -1,139 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-es_{context}"] -= Forwarding logs to an external Elasticsearch instance - -You can optionally forward logs to an external Elasticsearch instance in addition to, or instead of, the internal {product-title} Elasticsearch instance. You are responsible for configuring the external log aggregator to receive log data from {product-title}. - -To configure log forwarding to an external Elasticsearch instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. The external Elasticsearch output can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -To forward logs to both an external and the internal Elasticsearch instance, create outputs and pipelines to the external instance and a pipeline that uses the `default` output to forward logs to the internal instance. You do not need to create a `default` output. If you do configure a `default` output, you receive an error message because the `default` output is reserved for the Red Hat OpenShift Logging Operator. - -[NOTE] -==== -If you want to forward logs to *only* the internal {product-title} Elasticsearch instance, you do not need to create a `ClusterLogForwarder` CR. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: elasticsearch-insecure <3> - type: "elasticsearch" <4> - url: http://elasticsearch.insecure.com:9200 <5> - - name: elasticsearch-secure - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 <6> - secret: - name: es-secret <7> - pipelines: - - name: application-logs <8> - inputRefs: <9> - - application - - audit - outputRefs: - - elasticsearch-secure <10> - - default <11> - parse: json <12> - labels: - myLabel: "myValue" <13> - - name: infrastructure-audit-logs <14> - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - labels: - logs: "audit-infra" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `elasticsearch` type. -<5> Specify the URL and port of the external Elasticsearch instance as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. -<6> For a secure connection, you can specify an `https` or `http` URL that you authenticate by specifying a `secret`. -<7> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. For more information, see the following "Example: Setting secret that contains a username and password." -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to send the logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Example: Setting a secret that contains a username and password - -You can use a secret that contains a username and password to authenticate a secure connection to an external Elasticsearch instance. - -For example, if you cannot use mutual TLS (mTLS) keys because a third party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. - -. Create a `Secret` YAML file similar to the following example. Use base64-encoded values for the `username` and `password` fields. The secret type is opaque by default. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: openshift-test-secret -data: - username: - password: ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-logging openshift-test-secret.yaml ----- - -. Specify the name of the secret in the `ClusterLogForwarder` CR: -+ -[source,yaml] ----- -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: openshift-test-secret ----- -+ -[NOTE] -==== -In the value of the `url` field, the prefix can be `http` or `https`. -==== - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-fluentd.adoc b/modules/cluster-logging-collector-log-forward-fluentd.adoc deleted file mode 100644 index cf1c1f6384b0..000000000000 --- a/modules/cluster-logging-collector-log-forward-fluentd.adoc +++ /dev/null @@ -1,98 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-fluentd_{context}"] -= Forwarding logs using the Fluentd forward protocol - -You can use the Fluentd *forward* protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator to receive the logs from {product-title}. - -To configure log forwarding using the *forward* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the Fluentd servers, and pipelines that use those outputs. The Fluentd output can use a TCP (insecure) or TLS (secure TCP) connection. - -[NOTE] -==== -Alternately, you can use a config map to forward logs using the *forward* protocols. However, this method is deprecated in {product-title} and will be removed in a future release. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - passphrase: phrase <7> - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - pipelines: - - name: forward-to-fluentd-secure <8> - inputRefs: <9> - - application - - audit - outputRefs: - - fluentd-server-secure <10> - - default <11> - parse: json <12> - labels: - clusterId: "C1234" <13> - - name: forward-to-fluentd-insecure <14> - inputRefs: - - infrastructure - outputRefs: - - fluentd-server-insecure - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `fluentdForward` type. -<5> Specify the URL and port of the external Fluentd instance as a valid absolute URL. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<7> Optional: Specify the password or passphrase that protects the private key file. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id="cluster-logging-collector-log-forward-nano-precision"] -== Enabling nanosecond precision for Logstash to ingest data from fluentd -For Logstash to ingest log data from fluentd, you must enable nanosecond precision in the Logstash configuration file. - -.Procedure -* In the Logstash configuration file, set `nanosecond_precision` to `true`. - -.Example Logstash configuration file -[source,terminal] -.... -input { tcp { codec => fluent { nanosecond_precision => true } port => 24114 } } -filter { } -output { stdout { codec => rubydebug } } -.... diff --git a/modules/cluster-logging-collector-log-forward-gcp.adoc b/modules/cluster-logging-collector-log-forward-gcp.adoc deleted file mode 100644 index 9dfe1b9a7689..000000000000 --- a/modules/cluster-logging-collector-log-forward-gcp.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-external.adoc -// - -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-gcp_{context}"] -= Forwarding logs to Google Cloud Platform (GCP) - -You can forward logs to link:https://cloud.google.com/logging/docs/basic-concepts[Google Cloud Logging] in addition to, or instead of, the internal default {product-title} log store. - -[NOTE] -==== -Using this feature with Fluentd is not supported. -==== - -.Prerequisites -* {logging-title-uc} Operator 5.5.1 and later - -.Procedure - -. Create a secret using your link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys[Google service account key]. -+ -[source,terminal,subs="+quotes"] ----- -$ oc -n openshift-logging create secret generic gcp-secret --from-file google-application-credentials.json=__ ----- -. Create a `ClusterLogForwarder` Custom Resource YAML using the template below: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - outputs: - - name: gcp-1 - type: googleCloudLogging - secret: - name: gcp-secret - googleCloudLogging: - projectId : "openshift-gce-devel" <1> - logId : "app-gcp" <2> - pipelines: - - name: test-app - inputRefs: <3> - - application - outputRefs: - - gcp-1 ----- -<1> Set either a `projectId`, `folderId`, `organizationId`, or `billingAccountId` field and its corresponding value, depending on where you want to store your logs in the link:https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy[GCP resource hierarchy]. -<2> Set the value to add to the `logName` field of the link:https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry[Log Entry]. -<3> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. - -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/billing/docs/concepts[Google Cloud Billing Documentation] -* link:https://cloud.google.com/logging/docs/view/logging-query-language[Google Cloud Logging Query Language Documentation] diff --git a/modules/cluster-logging-collector-log-forward-kafka.adoc b/modules/cluster-logging-collector-log-forward-kafka.adoc deleted file mode 100644 index a6a611cf2747..000000000000 --- a/modules/cluster-logging-collector-log-forward-kafka.adoc +++ /dev/null @@ -1,105 +0,0 @@ -[id="cluster-logging-collector-log-forward-kafka_{context}"] -= Forwarding logs to a Kafka broker - -You can forward logs to an external Kafka broker in addition to, or instead of, the default Elasticsearch log store. - -To configure log forwarding to an external Kafka instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. You can include a specific Kafka topic in the output or use the default. The Kafka output can use a TCP (insecure) or TLS (secure TCP) connection. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: app-logs <3> - type: kafka <4> - url: tls://kafka.example.devlab.com:9093/app-topic <5> - secret: - name: kafka-secret <6> - - name: infra-logs - type: kafka - url: tcp://kafka.devlab2.example.com:9093/infra-topic <7> - - name: audit-logs - type: kafka - url: tls://kafka.qelab.example.com:9093/audit-topic - secret: - name: kafka-secret-qe - pipelines: - - name: app-topic <8> - inputRefs: <9> - - application - outputRefs: <10> - - app-logs - parse: json <11> - labels: - logType: "application" <12> - - name: infra-topic <13> - inputRefs: - - infrastructure - outputRefs: - - infra-logs - labels: - logType: "infra" - - name: audit-topic - inputRefs: - - audit - outputRefs: - - audit-logs - - default <14> - labels: - logType: "audit" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `kafka` type. -<5> Specify the URL and port of the Kafka broker as a valid absolute URL, optionally with a specific topic. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<7> Optional: To send an insecure output, use a `tcp` prefix in front of the URL. Also omit the `secret` key and its `name` from this output. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<12> Optional: String. One or more labels to add to the logs. -<13> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. -<14> Optional: Specify `default` to forward logs to the internal Elasticsearch instance. - -. Optional: To forward a single output to multiple Kafka brokers, specify an array of Kafka brokers as shown in this example: -+ -[source,yaml] ----- -... -spec: - outputs: - - name: app-logs - type: kafka - secret: - name: kafka-secret-dev - kafka: <1> - brokers: <2> - - tls://kafka-broker1.example.com:9093/ - - tls://kafka-broker2.example.com:9093/ - topic: app-topic <3> -... ----- -<1> Specify a `kafka` key that has a `brokers` and `topic` key. -<2> Use the `brokers` key to specify an array of one or more brokers. -<3> Use the `topic` key to specify the target topic that will receive the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc b/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc deleted file mode 100644 index d12fbfe96cd8..000000000000 --- a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc +++ /dev/null @@ -1,73 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-logs-from-application-pods_{context}"] -= Forwarding application logs from specific pods - -As a cluster administrator, you can use Kubernetes pod labels to gather log data from specific pods and forward it to a log collector. - -Suppose that you have an application composed of pods running alongside other pods in various namespaces. If those pods have labels that identify the application, you can gather and output their log data to a specific log collector. - -To specify the pod labels, you use one or more `matchLabels` key-value pairs. If you specify multiple key-value pairs, the pods must match all of them to be selected. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the pod labels using simple equality-based selectors under `inputs[].name.application.selector.matchLabels`, as shown in the following example. -+ -.Example `ClusterLogForwarder` CR YAML file -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - pipelines: - - inputRefs: [ myAppLogData ] <3> - outputRefs: [ default ] <4> - parse: json <5> - inputs: <6> - - name: myAppLogData - application: - selector: - matchLabels: <7> - environment: production - app: nginx - namespaces: <8> - - app1 - - app2 - outputs: <9> - - default - ... ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify one or more comma-separated values from `inputs[].name`. -<4> Specify one or more comma-separated values from `outputs[]`. -<5> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<6> Define a unique `inputs[].name` for each application that has a unique set of pod labels. -<7> Specify the key-value pairs of pod labels whose log data you want to gather. You must specify both a key and value, not just a key. To be selected, the pods must match all the key-value pairs. -<8> Optional: Specify one or more namespaces. -<9> Specify one or more outputs to forward your log data to. The optional `default` output shown here sends log data to the internal Elasticsearch instance. - -. Optional: To restrict the gathering of log data to specific namespaces, use `inputs[].name.application.namespaces`, as shown in the preceding example. - -. Optional: You can send log data from additional applications that have different pod labels to the same pipeline. -.. For each unique combination of pod labels, create an additional `inputs[].name` section similar to the one shown. -.. Update the `selectors` to match the pod labels of this application. -.. Add the new `inputs[].name` value to `inputRefs`. For example: -+ ----- -- inputRefs: [ myAppLogData, myOtherAppLogData ] ----- - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information on `matchLabels` in Kubernetes, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements[Resources that support set-based requirements]. diff --git a/modules/cluster-logging-collector-log-forward-loki.adoc b/modules/cluster-logging-collector-log-forward-loki.adoc deleted file mode 100644 index 82845208ad83..000000000000 --- a/modules/cluster-logging-collector-log-forward-loki.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-loki_{context}"] -= Forwarding logs to Loki - -You can forward logs to an external Loki logging system in addition to, or instead of, the internal default {product-title} Elasticsearch instance. - -To configure log forwarding to Loki, you must create a `ClusterLogForwarder` custom resource (CR) with an output to Loki, and a pipeline that uses the output. The output to Loki can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -.Prerequisites - -* You must have a Loki logging system running at the URL you specify with the `url` field in the CR. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: ClusterLogForwarder - metadata: - name: instance <1> - namespace: openshift-logging <2> - spec: - outputs: - - name: loki-insecure <3> - type: "loki" <4> - url: http://loki.insecure.com:3100 <5> - loki: - tenantKey: kubernetes.namespace_name - labelKeys: kubernetes.labels.foo - - name: loki-secure <6> - type: "loki" - url: https://loki.secure.com:3100 - secret: - name: loki-secret <7> - loki: - tenantKey: kubernetes.namespace_name <8> - labelKeys: kubernetes.labels.foo <9> - pipelines: - - name: application-logs <10> - inputRefs: <11> - - application - - audit - outputRefs: <12> - - loki-secure ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the type as `"loki"`. -<5> Specify the URL and port of the Loki system as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. Loki's default port for HTTP(S) communication is 3100. -<6> For a secure connection, you can specify an `https` or `http` URL that you authenticate by specifying a `secret`. -<7> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. For more information, see the following "Example: Setting secret that contains a username and password." -<8> Optional: Specify a meta-data key field to generate values for the `TenantID` field in Loki. For example, setting `tenantKey: kubernetes.namespace_name` uses the names of the Kubernetes namespaces as values for tenant IDs in Loki. To see which other log record fields you can specify, see the "Log Record Fields" link in the following "Additional resources" section. -<9> Optional: Specify a list of meta-data field keys to replace the default Loki labels. Loki label names must match the regular expression `[a-zA-Z_:][a-zA-Z0-9_:]*`. Illegal characters in meta-data keys are replaced with `_` to form the label name. For example, the `kubernetes.labels.foo` meta-data key becomes Loki label `kubernetes_labels_foo`. If you do not set `labelKeys`, the default value is: `[log_type, kubernetes.namespace_name, kubernetes.pod_name, kubernetes_host]`. Keep the set of labels small because Loki limits the size and number of labels allowed. See link:https://grafana.com/docs/loki/latest/configuration/#limits_config[Configuring Loki, limits_config]. You can still query based on any log record field using query filters. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. -+ -[NOTE] -==== -Because Loki requires log streams to be correctly ordered by timestamp, `labelKeys` always includes the `kubernetes_host` label set, even if you do not specify it. This inclusion ensures that each stream originates from a single host, which prevents timestamps from becoming disordered due to clock differences on different hosts. -==== - - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-project.adoc b/modules/cluster-logging-collector-log-forward-project.adoc deleted file mode 100644 index 19f8880edb5b..000000000000 --- a/modules/cluster-logging-collector-log-forward-project.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-project_{context}"] -= Forwarding application logs from specific projects - -You can use the Cluster Log Forwarder to send a copy of the application logs from specific projects to an external log aggregator. You can do this in addition to, or instead of, using the default Elasticsearch log store. You must also configure the external log aggregator to receive log data from {product-title}. - -To configure forwarding application logs from a project, you must create a `ClusterLogForwarder` custom resource (CR) with at least one input from a project, optional outputs for other log aggregators, and pipelines that use those inputs and outputs. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - inputs: <7> - - name: my-app-logs - application: - namespaces: - - my-project - pipelines: - - name: forward-to-fluentd-insecure <8> - inputRefs: <9> - - my-app-logs - outputRefs: <10> - - fluentd-server-insecure - parse: json <11> - labels: - project: "my-project" <12> - - name: forward-to-fluentd-secure <13> - inputRefs: - - application - - audit - - infrastructure - outputRefs: - - fluentd-server-secure - - default - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the output type: `elasticsearch`, `fluentdForward`, `syslog`, or `kafka`. -<5> Specify the URL and port of the external log aggregator as a valid absolute URL. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and have *tls.crt*, *tls.key*, and *ca-bundle.crt* keys that each point to the certificates they represent. -<7> Configuration for an input to filter application logs from the specified projects. -<8> Configuration for a pipeline to use the input to send project application logs to an external Fluentd instance. -<9> The `my-app-logs` input. -<10> The name of the output to use. -<11> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<12> Optional: String. One or more labels to add to the logs. -<13> Configuration for a pipeline to send logs to other log aggregators. -** Optional: Specify a name for the pipeline. -** Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** Specify the name of the output to use when forwarding logs with this pipeline. -** Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc deleted file mode 100644 index fc8b131c687a..000000000000 --- a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-external.adoc -// - -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-secret-cloudwatch_{context}"] -== Creating a secret for AWS CloudWatch with an existing AWS role -If you have an existing role for AWS, you can create a secret for AWS with STS using the `oc create secret --from-literal` command. - -.Procedure - -* In the CLI, enter the following to generate a secret for AWS: -+ -[source,terminal] ----- -$ oc create secret generic cw-sts-secret -n openshift-logging --from-literal=role_arn=arn:aws:iam::123456789012:role/my-role_with-permissions ----- -+ -.Example Secret -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: openshift-logging - name: my-secret-name -stringData: - role_arn: arn:aws:iam::123456789012:role/my-role_with-permissions ----- diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc deleted file mode 100644 index d5a72e9c2ef6..000000000000 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ /dev/null @@ -1,194 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-syslog_{context}"] -= Forwarding logs using the syslog protocol - -You can use the *syslog* link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator, such as a syslog server, to receive the logs from {product-title}. - -To configure log forwarding using the *syslog* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the syslog servers, and pipelines that use those outputs. The syslog output can use a UDP, TCP, or TLS connection. - -//SME-Feedback-Req: Is the below note accurate? -[NOTE] -==== -Alternately, you can use a config map to forward logs using the *syslog* RFC3164 protocols. However, this method is deprecated in {product-title} and will be removed in a future release. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: rsyslog-east <3> - type: syslog <4> - syslog: <5> - facility: local0 - rfc: RFC3164 - payloadKey: message - severity: informational - url: 'tls://rsyslogserver.east.example.com:514' <6> - secret: <7> - name: syslog-secret - - name: rsyslog-west - type: syslog - syslog: - appName: myapp - facility: user - msgID: mymsg - procID: myproc - rfc: RFC5424 - severity: debug - url: 'udp://rsyslogserver.west.example.com:514' - pipelines: - - name: syslog-east <8> - inputRefs: <9> - - audit - - application - outputRefs: <10> - - rsyslog-east - - default <11> - parse: json <12> - labels: - secure: "true" <13> - syslog: "east" - - name: syslog-west <14> - inputRefs: - - infrastructure - outputRefs: - - rsyslog-west - - default - labels: - syslog: "west" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `syslog` type. -<5> Optional: Specify the syslog parameters, listed below. -<6> Specify the URL and port of the external syslog instance. You can use the `udp` (insecure), `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<7> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-log-source] -== Adding log source information to message output - -You can add `namespace_name`, `pod_name`, and `container_name` elements to the `message` field of the record by adding the `AddLogSource` field to your `ClusterLogForwarder` custom resource (CR). - -[source,yaml] ----- - spec: - outputs: - - name: syslogout - syslog: - addLogSource: true - facility: user - payloadKey: message - rfc: RFC3164 - severity: debug - tag: mytag - type: syslog - url: tls://syslog-receiver.openshift-logging.svc:24224 - pipelines: - - inputRefs: - - application - name: test-app - outputRefs: - - syslogout ----- - -[NOTE] -==== -This configuration is compatible with both RFC3164 and RFC5424. -==== - -.Example syslog message output without `AddLogSource` -[source, text] ----- -<15>1 2020-11-15T17:06:14+00:00 fluentd-9hkb4 mytag - - - {"msgcontent"=>"Message Contents", "timestamp"=>"2020-11-15 17:06:09", "tag_key"=>"rec_tag", "index"=>56} ----- - -.Example syslog message output with `AddLogSource` - -[source, text] ----- -<15>1 2020-11-16T10:49:37+00:00 crc-j55b9-master-0 mytag - - - namespace_name=clo-test-6327,pod_name=log-generator-ff9746c49-qxm7l,container_name=log-generator,message={"msgcontent":"My life is my message", "timestamp":"2020-11-16 10:49:36", "tag_key":"rec_tag", "index":76} ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-parms] -== Syslog parameters - -You can configure the following for the `syslog` outputs. For more information, see the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] RFC. - -* facility: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog facility]. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `kern` for kernel messages -** `1` or `user` for user-level messages, the default. -** `2` or `mail` for the mail system -** `3` or `daemon` for system daemons -** `4` or `auth` for security/authentication messages -** `5` or `syslog` for messages generated internally by syslogd -** `6` or `lpr` for the line printer subsystem -** `7` or `news` for the network news subsystem -** `8` or `uucp` for the UUCP subsystem -** `9` or `cron` for the clock daemon -** `10` or `authpriv` for security authentication messages -** `11` or `ftp` for the FTP daemon -** `12` or `ntp` for the NTP subsystem -** `13` or `security` for the syslog audit log -** `14` or `console` for the syslog alert log -** `15` or `solaris-cron` for the scheduling daemon -** `16`–`23` or `local0` – `local7` for locally used facilities -* Optional: `payloadKey`: The record field to use as payload for the syslog message. -+ -[NOTE] -==== -Configuring the `payloadKey` parameter prevents other parameters from being forwarded to the syslog. -==== -+ -* rfc: The RFC to be used for sending logs using syslog. The default is RFC5424. -* severity: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog severity] to set on outgoing syslog records. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `Emergency` for messages indicating the system is unusable -** `1` or `Alert` for messages indicating action must be taken immediately -** `2` or `Critical` for messages indicating critical conditions -** `3` or `Error` for messages indicating error conditions -** `4` or `Warning` for messages indicating warning conditions -** `5` or `Notice` for messages indicating normal but significant conditions -** `6` or `Informational` for messages indicating informational messages -** `7` or `Debug` for messages indicating debug-level messages, the default -* tag: Tag specifies a record field to use as a tag on the syslog message. -* trimPrefix: Remove the specified prefix from the tag. - -[id=cluster-logging-collector-log-forward-examples-syslog-5424] -== Additional RFC5424 syslog parameters - -The following parameters apply to RFC5424: - -* appName: The APP-NAME is a free-text string that identifies the application that sent the log. Must be specified for `RFC5424`. -* msgID: The MSGID is a free-text string that identifies the type of message. Must be specified for `RFC5424`. -* procID: The PROCID is a free-text string. A change in the value indicates a discontinuity in syslog reporting. Must be specified for `RFC5424`. diff --git a/modules/cluster-logging-collector-log-forwarding-about.adoc b/modules/cluster-logging-collector-log-forwarding-about.adoc deleted file mode 100644 index 41d98d4df978..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-about.adoc +++ /dev/null @@ -1,197 +0,0 @@ -:_content-type: CONCEPT -[id="cluster-logging-collector-log-forwarding-about_{context}"] -= About forwarding logs to third-party systems - -To send logs to specific endpoints inside and outside your {product-title} cluster, you specify a combination of _outputs_ and _pipelines_ in a `ClusterLogForwarder` custom resource (CR). You can also use _inputs_ to forward the application logs associated with a specific project to an endpoint. Authentication is provided by a Kubernetes _Secret_ object. - -_output_:: The destination for log data that you define, or where you want the logs sent. An output can be one of the following types: -+ --- -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `fluentdForward`. An external log aggregation solution that supports Fluentd. This option uses the Fluentd *forward* protocols. The `fluentForward` output can use a TCP or TLS connection and supports shared-key authentication by providing a *shared_key* field in a secret. Shared-key authentication can be used with or without TLS. - -* `syslog`. An external log aggregation solution that supports the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocols. The `syslog` output can use a UDP, TCP, or TLS connection. - -* `cloudwatch`. Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). - -* `loki`. Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. - -* `kafka`. A Kafka broker. The `kafka` output can use a TCP or TLS connection. - -* `default`. The internal {product-title} Elasticsearch instance. You are not required to configure the default output. If you do configure a `default` output, you receive an error message because the `default` output is reserved for the Red Hat OpenShift Logging Operator. --- -+ -_pipeline_:: Defines simple routing from one log type to one or more outputs, or which logs you want to send. The log types are one of the following: -+ --- -* `application`. Container logs generated by user applications running in the cluster, except infrastructure container applications. - -* `infrastructure`. Container logs from pods that run in the `openshift*`, `kube*`, or `default` projects and journal logs sourced from node file system. - -* `audit`. Audit logs generated by the node audit system, `auditd`, Kubernetes API server, OpenShift API server, and OVN network. --- -+ -You can add labels to outbound log messages by using `key:value` pairs in the pipeline. For example, you might add a label to messages that are forwarded to other data centers or label the logs by type. Labels that are added to objects are also forwarded with the log message. - -_input_:: Forwards the application logs associated with a specific project to a pipeline. -+ --- -In the pipeline, you define which log types to forward using an `inputRef` parameter and where to forward the logs to using an `outputRef` parameter. --- -+ - -_Secret_:: A `key:value map` that contains confidential data such as user credentials. - -Note the following: - -* If a `ClusterLogForwarder` CR object exists, logs are not forwarded to the default Elasticsearch instance, unless there is a pipeline with the `default` output. - -* By default, the {logging} sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, do not configure the Log Forwarding API. - -* If you do not define a pipeline for a log type, the logs of the undefined types are dropped. For example, if you specify a pipeline for the `application` and `audit` types, but do not specify a pipeline for the `infrastructure` type, `infrastructure` logs are dropped. - -* You can use multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. - -* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. The {logging} does not comply with those regulations. - -The following example forwards the audit logs to a secure external Elasticsearch instance, the infrastructure logs to an insecure external Elasticsearch instance, the application logs to a Kafka broker, and the application logs from the `my-apps-logs` project to the internal Elasticsearch instance. - -.Sample log forwarding outputs and pipelines -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: elasticsearch-secure <3> - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: elasticsearch - - name: elasticsearch-insecure <4> - type: "elasticsearch" - url: http://elasticsearch.insecure.com:9200 - - name: kafka-app <5> - type: "kafka" - url: tls://kafka.secure.com:9093/app-topic - inputs: <6> - - name: my-app-logs - application: - namespaces: - - my-project - pipelines: - - name: audit-logs <7> - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default - parse: json <8> - labels: - secure: "true" <9> - datacenter: "east" - - name: infrastructure-logs <10> - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - labels: - datacenter: "west" - - name: my-app <11> - inputRefs: - - my-app-logs - outputRefs: - - default - - inputRefs: <12> - - application - outputRefs: - - kafka-app - labels: - datacenter: "south" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Configuration for an secure Elasticsearch output using a secret with a secure URL. -** A name to describe the output. -** The type of output: `elasticsearch`. -** The secure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -** The secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project. -<4> Configuration for an insecure Elasticsearch output: -** A name to describe the output. -** The type of output: `elasticsearch`. -** The insecure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -<5> Configuration for a Kafka output using a client-authenticated TLS communication over a secure URL -** A name to describe the output. -** The type of output: `kafka`. -** Specify the URL and port of the Kafka broker as a valid absolute URL, including the prefix. -<6> Configuration for an input to filter application logs from the `my-project` namespace. -<7> Configuration for a pipeline to send audit logs to the secure external Elasticsearch instance: -** A name to describe the pipeline. -** The `inputRefs` is the log type, in this example `audit`. -** The `outputRefs` is the name of the output to use, in this example `elasticsearch-secure` to forward to the secure Elasticsearch instance and `default` to forward to the internal Elasticsearch instance. -** Optional: Labels to add to the logs. -<8> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<9> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<10> Configuration for a pipeline to send infrastructure logs to the insecure external Elasticsearch instance. -<11> Configuration for a pipeline to send logs from the `my-project` project to the internal Elasticsearch instance. -** A name to describe the pipeline. -** The `inputRefs` is a specific input: `my-app-logs`. -** The `outputRefs` is `default`. -** Optional: String. One or more labels to add to the logs. -<12> Configuration for a pipeline to send logs to the Kafka broker, with no pipeline name: -** The `inputRefs` is the log type, in this example `application`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -[discrete] -[id="cluster-logging-external-fluentd_{context}"] -== Fluentd log handling when the external log aggregator is unavailable - -If your external logging aggregator becomes unavailable and cannot receive logs, Fluentd continues to collect logs and stores them in a buffer. When the log aggregator becomes available, log forwarding resumes, including the buffered logs. If the buffer fills completely, Fluentd stops collecting logs. {product-title} rotates the logs and deletes them. You cannot adjust the buffer size or add a persistent volume claim (PVC) to the Fluentd daemon set or pods. - -[discrete] -== Supported Authorization Keys -Common key types are provided here. Some output types support additional specialized keys, documented with the output-specific configuration field. All secret keys are optional. Enable the security features you want by setting the relevant keys. You are responsible for creating and maintaining any additional configurations that external destinations might require, such as keys and secrets, service accounts, port openings, or global proxy configuration. Open Shift Logging will not attempt to verify a mismatch between authorization combinations. - -Transport Layer Security (TLS):: Using a TLS URL ('http://...' or 'ssl://...') without a Secret enables basic TLS server-side authentication. Additional TLS features are enabled by including a Secret and setting the following optional fields: - -* `tls.crt`: (string) File name containing a client certificate. Enables mutual authentication. Requires `tls.key`. - -* `tls.key`: (string) File name containing the private key to unlock the client certificate. Requires `tls.crt`. - -* `passphrase`: (string) Passphrase to decode an encoded TLS private key. Requires `tls.key`. - -* `ca-bundle.crt`: (string) File name of a customer CA for server authentication. - -Username and Password:: -* `username`: (string) Authentication user name. Requires `password`. -* `password`: (string) Authentication password. Requires `username`. - -Simple Authentication Security Layer (SASL):: -* `sasl.enable` (boolean) Explicitly enable or disable SASL. -If missing, SASL is automatically enabled when any of the other `sasl.` keys are set. -* `sasl.mechanisms`: (array) List of allowed SASL mechanism names. -If missing or empty, the system defaults are used. -* `sasl.allow-insecure`: (boolean) Allow mechanisms that send clear-text passwords. Defaults to false. - -== Creating a Secret - -You can create a secret in the directory that contains your certificate and key files by using the following command: -[subs="+quotes"] ----- -$ oc create secret generic -n openshift-logging \ - --from-file=tls.key= - --from-file=tls.crt= - --from-file=ca-bundle.crt= - --from-literal=username= - --from-literal=password= ----- - -[NOTE] -==== -Generic or opaque secrets are recommended for best results. -==== diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc deleted file mode 100644 index 39c1385383fa..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-1_{context}"] - -= Supported log data output types in OpenShift Logging 5.1 - -Red Hat OpenShift Logging 5.1 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.1 - -Elasticsearch 6.8.4 - -Elasticsearch 7.12.2 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| kafka -| kafka 0.11 -a| kafka 2.4.1 - -kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note to tech writer, validate these items against the corresponding line of the test configuration file that Red Hat OpenShift Logging 5.0 uses: https://github.com/openshift/origin-aggregated-logging/blob/release-5.0/fluentd/Gemfile.lock -// This file is the authoritative source of information about which items and versions Red Hat tests and supports. -// According to this link:https://github.com/zendesk/ruby-kafka#compatibility[Zendesk compatibility list for ruby-kafka], the fluent-plugin-kafka plugin supports Kafka version 0.11. -// Logstash support is according to https://github.com/openshift/cluster-logging-operator/blob/master/test/functional/outputs/forward_to_logstash_test.go#L37 - -[NOTE] -==== -Previously, the syslog output supported only RFC-3164. The current syslog output adds support for RFC-5424. -==== - -//ENG-Feedback: How can we reformat this to accurately reflect 5.4? diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc deleted file mode 100644 index aec5cc9350b6..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-2_{context}"] - -= Supported log data output types in OpenShift Logging 5.2 - -Red Hat OpenShift Logging 5.2 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.1 - -Elasticsearch 6.8.4 - -Elasticsearch 7.12.2 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.3.0 deployed on OCP and Grafana labs - -| kafka -| kafka 0.11 -a| kafka 2.4.1 - -kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note to tech writer, validate these items against the corresponding line of the test configuration file that Red Hat OpenShift Logging 5.0 uses: https://github.com/openshift/origin-aggregated-logging/blob/release-5.0/fluentd/Gemfile.lock -// This file is the authoritative source of information about which items and versions Red Hat tests and supports. -// According to this link:https://github.com/zendesk/ruby-kafka#compatibility[Zendesk compatibility list for ruby-kafka], the fluent-plugin-kafka plugin supports Kafka version 0.11. -// Logstash support is according to https://github.com/openshift/cluster-logging-operator/blob/master/test/functional/outputs/forward_to_logstash_test.go#L37 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc deleted file mode 100644 index 72ec71c764b7..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-3_{context}"] - -= Supported log data output types in OpenShift Logging 5.3 - -Red Hat OpenShift Logging 5.3 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.2.1 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_elasticsearch_index_test.go#L17 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.3/Makefile#L23 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/helpers/loki/receiver.go#L25 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.3/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/framework/functional/output_syslog.go#L13 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc deleted file mode 100644 index c41053c98de8..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-4_{context}"] - -= Supported log data output types in OpenShift Logging 5.4 - -Red Hat OpenShift Logging 5.4 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.5 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.2.1 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_elasticsearch_index_test.go#L17 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.4/Makefile#L23 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/helpers/loki/receiver.go#L26 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.4/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/framework/functional/output_syslog.go#L13 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc deleted file mode 100644 index 0ee234a1d603..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-5_{context}"] - -= Supported log data output types in OpenShift Logging 5.5 - -Red Hat OpenShift Logging 5.5 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.6 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.5.0 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/elasticsearch/forward_to_elasticsearch_index_test.go#L24 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/framework/functional/output_elasticsearch7.go#L13 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.5/Makefile#L24 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/helpers/loki/receiver.go#L26 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.5/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/framework/functional/output_syslog.go#L14 \ No newline at end of file diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc deleted file mode 100644 index f93995221cb7..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-6_{context}"] - -= Supported log data output types in OpenShift Logging 5.6 - -Red Hat OpenShift Logging 5.6 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.23 - -Elasticsearch 7.10.1 - -Elasticsearch 8.6.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.6 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.5.0 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -[IMPORTANT] -==== -Fluentd doesn't support Elasticsearch 8 as of 5.6.2. -Vector doesn't support fluentd/logstash/rsyslog before 5.7.0. -==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/functional/outputs/cloudwatch/forward_to_cloudwatch_test.go#L13 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/framework/functional/output_elasticsearch.go#L16-L18 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.6/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.6/Makefile#L50 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/helpers/loki/receiver.go#L27 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.6/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/framework/functional/output_syslog.go#L14 diff --git a/modules/cluster-logging-collector-pod-location.adoc b/modules/cluster-logging-collector-pod-location.adoc deleted file mode 100644 index 47d350339f6d..000000000000 --- a/modules/cluster-logging-collector-pod-location.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-pod-location_{context}"] -= Viewing logging collector pods - -You can view the Fluentd logging collector pods and the corresponding nodes that they are running on. The Fluentd logging collector pods run only in the `openshift-logging` project. - -.Procedure - -* Run the following command in the `openshift-logging` project to view the Fluentd logging collector pods and their details: - -[source,terminal] ----- -$ oc get pods --selector component=collector -o wide -n openshift-logging ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -fluentd-8d69v 1/1 Running 0 134m 10.130.2.30 master1.example.com -fluentd-bd225 1/1 Running 0 134m 10.131.1.11 master2.example.com -fluentd-cvrzs 1/1 Running 0 134m 10.130.0.21 master3.example.com -fluentd-gpqg2 1/1 Running 0 134m 10.128.2.27 worker1.example.com -fluentd-l9j7j 1/1 Running 0 134m 10.129.2.31 worker2.example.com ----- - -//// -[source,terminal] ----- -$ oc get pods -o wide | grep rsyslog ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -rsyslog-5gtfs 1/1 Running 0 3m6s 10.130.0.40 ip-10-0-148-139.ec2.internal -rsyslog-67rv6 1/1 Running 0 3m6s 10.128.2.13 ip-10-0-158-206.ec2.internal -rsyslog-bqgjn 1/1 Running 0 3m6s 10.131.0.11 ip-10-0-132-167.ec2.internal -rsyslog-cjmdp 1/1 Running 0 3m6s 10.129.2.16 ip-10-0-139-191.ec2.internal -rsyslog-kqlzh 1/1 Running 0 3m6s 10.129.0.37 ip-10-0-141-243.ec2.internal -rsyslog-nhshr 1/1 Running 0 3m6s 10.128.0.41 ip-10-0-143-38.ec2.internal ----- -//// diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc deleted file mode 100644 index 1738a57067bb..000000000000 --- a/modules/cluster-logging-collector-tolerations.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-tolerations_{context}"] -= Using tolerations to control the log collector pod placement - -You can ensure which nodes the logging collector pods run on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to logging collector pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. You can use taints and tolerations -to ensure the pod does not get evicted for things like memory and CPU issues. - -By default, the logging collector pods have the following toleration: - -[source,yaml] ----- -tolerations: -- key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoExecute" ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want logging collector pods to schedule logging collector pods: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 collector=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `collector`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and removes existing pods -that do not match. - -. Edit the `collection` stanza of the `ClusterLogging` custom resource (CR) to configure a toleration for the logging collector pods: -+ -[source,yaml] ----- - collection: - logs: - type: "fluentd" - fluentd: - tolerations: - - key: "collector" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. diff --git a/modules/cluster-logging-collector-tuning.adoc b/modules/cluster-logging-collector-tuning.adoc deleted file mode 100644 index a0217dd494aa..000000000000 --- a/modules/cluster-logging-collector-tuning.adoc +++ /dev/null @@ -1,173 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-tuning_{context}"] -= Advanced configuration for the log forwarder - -The {logging-title} includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: - -* Chunk and chunk buffer sizes -* Chunk flushing behavior -* Chunk forwarding retry behavior - -Fluentd collects log data in a single blob called a _chunk_. When Fluentd creates a chunk, the chunk is considered to be in the _stage_, where the chunk gets filled with data. When the chunk is full, Fluentd moves the chunk to the _queue_, where chunks are held before being flushed, or written out to their destination. Fluentd can fail to flush a chunk for a number of reasons, such as network issues or capacity issues at the destination. If a chunk cannot be flushed, Fluentd retries flushing as configured. - -By default in {product-title}, Fluentd uses the _exponential backoff_ method to retry flushing, where Fluentd doubles the time it waits between attempts to retry flushing again, which helps reduce connection requests to the destination. You can disable exponential backoff and use the _periodic_ retry method instead, which retries flushing the chunks at a specified interval. - -These parameters can help you determine the trade-offs between latency and throughput. - -* To optimize Fluentd for throughput, you could use these parameters to reduce network packet count by configuring larger buffers and queues, delaying flushes, and setting longer times between retries. Be aware that larger buffers require more space on the node file system. - -* To optimize for low latency, you could use the parameters to send data as soon as possible, avoid the build-up of batches, have shorter queues and buffers, and use more frequent flush and retries. - -You can configure the chunking and flushing behavior using the following parameters in the `ClusterLogging` custom resource (CR). The parameters are then automatically added to the Fluentd config map for use by Fluentd. - -[NOTE] -==== -These parameters are: - -* Not relevant to most users. The default settings should give good general performance. -* Only for advanced users with detailed knowledge of Fluentd configuration and performance. -* Only for performance tuning. They have no effect on functional aspects of logging. -==== - -.Advanced Fluentd Configuration Parameters -[options="header"] -|=== - -|Parameter |Description |Default - -|`chunkLimitSize` -|The maximum size of each chunk. Fluentd stops writing data to a chunk when it reaches this size. Then, Fluentd sends the chunk to the queue and opens a new chunk. -|`8m` - -|`totalLimitSize` -|The maximum size of the buffer, which is the total size of the stage and the queue. If the buffer size exceeds this value, Fluentd stops adding data to chunks and fails with an error. All data not in chunks is lost. -|`8G` - -|`flushInterval` -|The interval between chunk flushes. You can use `s` (seconds), `m` (minutes), `h` (hours), or `d` (days). -|`1s` - -|`flushMode` -a| The method to perform flushes: - -* `lazy`: Flush chunks based on the `timekey` parameter. You cannot modify the `timekey` parameter. -* `interval`: Flush chunks based on the `flushInterval` parameter. -* `immediate`: Flush chunks immediately after data is added to a chunk. -|`interval` - -|`flushThreadCount` -|The number of threads that perform chunk flushing. Increasing the number of threads improves the flush throughput, which hides network latency. -|`2` - -|`overflowAction` -a|The chunking behavior when the queue is full: - -* `throw_exception`: Raise an exception to show in the log. -* `block`: Stop data chunking until the full buffer issue is resolved. -* `drop_oldest_chunk`: Drop the oldest chunk to accept new incoming chunks. Older chunks have less value than newer chunks. -|`block` - -|`retryMaxInterval` -|The maximum time in seconds for the `exponential_backoff` retry method. -|`300s` - -|`retryType` -a|The retry method when flushing fails: - -* `exponential_backoff`: Increase the time between flush retries. Fluentd doubles the time it waits until the next retry until the `retry_max_interval` parameter is reached. -* `periodic`: Retries flushes periodically, based on the `retryWait` parameter. -|`exponential_backoff` - -|`retryTimeOut` -|The maximum time interval to attempt retries before the record is discarded. -|`60m` - -|`retryWait` -|The time in seconds before the next chunk flush. -|`1s` - -|=== - -For more information on the Fluentd chunk lifecycle, see link:https://docs.fluentd.org/buffer[Buffer Plugins] in the Fluentd documentation. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] -+ ----- -$ oc edit ClusterLogging instance ----- - -. Add or modify any of the following parameters: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - forwarder: - fluentd: - buffer: - chunkLimitSize: 8m <1> - flushInterval: 5s <2> - flushMode: interval <3> - flushThreadCount: 3 <4> - overflowAction: throw_exception <5> - retryMaxInterval: "300s" <6> - retryType: periodic <7> - retryWait: 1s <8> - totalLimitSize: 32m <9> -... ----- -<1> Specify the maximum size of each chunk before it is queued for flushing. -<2> Specify the interval between chunk flushes. -<3> Specify the method to perform chunk flushes: `lazy`, `interval`, or `immediate`. -<4> Specify the number of threads to use for chunk flushes. -<5> Specify the chunking behavior when the queue is full: `throw_exception`, `block`, or `drop_oldest_chunk`. -<6> Specify the maximum interval in seconds for the `exponential_backoff` chunk flushing method. -<7> Specify the retry type when chunk flushing fails: `exponential_backoff` or `periodic`. -<8> Specify the time in seconds before the next chunk flush. -<9> Specify the maximum size of the chunk buffer. - -. Verify that the Fluentd pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- - -. Check that the new values are in the `fluentd` config map: -+ -[source,terminal] ----- -$ oc extract configmap/fluentd --confirm ----- -+ -.Example fluentd.conf -[source,terminal] ----- - - @type file - path '/var/lib/fluentd/default' - flush_mode interval - flush_interval 5s - flush_thread_count 3 - retry_type periodic - retry_wait 1s - retry_max_interval 300s - retry_timeout 60m - queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}" - total_limit_size 32m - chunk_limit_size 8m - overflow_action throw_exception - ----- diff --git a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc b/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc deleted file mode 100644 index 44ef382c5ed0..000000000000 --- a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc +++ /dev/null @@ -1,109 +0,0 @@ -[id="cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_{context}"] -= Configuring JSON log data for Elasticsearch - -If your JSON logs follow more than one schema, storing them in a single index might cause type conflicts and cardinality problems. To avoid that, you must configure the `ClusterLogForwarder` custom resource (CR) to group each schema into a single output definition. This way, each schema is forwarded to a separate index. - -[IMPORTANT] -==== -If you forward JSON logs to the default Elasticsearch instance managed by OpenShift Logging, it generates new indices based on your configuration. To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Structure types - -You can use the following structure types in the `ClusterLogForwarder` CR to construct index names for the Elasticsearch log store: - -* `structuredTypeKey` (string, optional) is the name of a message field. The value of that field, if present, is used to construct the index name. -** `kubernetes.labels.` is the Kubernetes pod label whose value is used to construct the index name. -** `openshift.labels.` is the `pipeline.label.` element in the `ClusterLogForwarder` CR whose value is used to construct the index name. -** `kubernetes.container_name` uses the container name to construct the index name. -* `structuredTypeName`: (string, optional) If `structuredTypeKey` is not set or its key is not present, OpenShift Logging uses the value of `structuredTypeName` as the structured type. When you use both `structuredTypeKey` and `structuredTypeName` together, `structuredTypeName` provides a fallback index name if the key in `structuredTypeKey` is missing from the JSON log data. - -[NOTE] -==== -Although you can set the value of `structuredTypeKey` to any field shown in the "Log Record Fields" topic, the most useful fields are shown in the preceding list of structure types. -==== - -.A structuredTypeKey: kubernetes.labels. example - -Suppose the following: - -* Your cluster is running application pods that produce JSON logs in two different formats, "apache" and "google". -* The user labels these application pods with `logFormat=apache` and `logFormat=google`. -* You use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: kubernetes.labels.logFormat <1> - structuredTypeName: nologformat -pipelines: -- inputRefs: - outputRefs: default - parse: json <2> ----- -<1> Uses the value of the key-value pair that is formed by the Kubernetes `logFormat` label. -<2> Enables parsing JSON logs. - -In that case, the following structured log record goes to the `app-apache-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "apache", ...}} -} ----- - -And the following structured log record goes to the `app-google-write` index: - -[source] ----- -{ - "structured":{"name":"wilma","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "google", ...}} -} ----- - -.A structuredTypeKey: openshift.labels. example - -Suppose that you use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: openshift.labels.myLabel <1> - structuredTypeName: nologformat -pipelines: - - name: application-logs - inputRefs: - - application - - audit - outputRefs: - - elasticsearch-secure - - default - parse: json - labels: - myLabel: myValue <2> ----- -<1> Uses the value of the key-value pair that is formed by the OpenShift `myLabel` label. -<2> The `myLabel` element gives its string value, `myValue`, to the structured log record. - -In that case, the following structured log record goes to the `app-myValue-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "openshift":{"labels":{"myLabel": "myValue", ...}} -} ----- - -.Additional considerations - -* The Elasticsearch _index_ for structured records is formed by prepending "app-" to the structured type and appending "-write". -* Unstructured records are not sent to the structured index. They are indexed as usual in the application, infrastructure, or audit indices. -* If there is no non-empty structured type, forward an _unstructured_ record with no `structured` field. - -It is important not to overload Elasticsearch with too many indices. Only use distinct structured types for distinct log _formats_, *not* for each application or namespace. For example, most Apache applications use the same JSON log format and structured type, such as `LogApache`. diff --git a/modules/cluster-logging-configuring-image-about.adoc b/modules/cluster-logging-configuring-image-about.adoc deleted file mode 100644 index f0b6522dba2c..000000000000 --- a/modules/cluster-logging-configuring-image-about.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-configuring.adoc - -[id="cluster-logging-configuring-image-about_{context}"] -= Understanding {logging} component images - -There are several components in the {logging-title}, each one implemented with one or more images. Each image is specified by an environment variable -defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed. - -You can view the images by running the following command: - -[source,terminal] ----- -$ oc -n openshift-logging set env deployment/cluster-logging-operator --list | grep _IMAGE ----- -// logging test command and update the example output - -.Example output -[source,terminal] ----- -FLUENTD_IMAGE=registry.redhat.io/openshift-logging/fluentd-rhel8:latest@sha256:ffdf79da7386871d2bc24cd937e02284b30f85a9979dc8c635fb73021cbca2f3 <1> ----- -<1> *FLUENTD_IMAGE* deploys Fluentd. - -[NOTE] -==== -Promtail is not officially supported at this time. -==== - -The values might be different depending on your environment. diff --git a/modules/cluster-logging-cpu-memory.adoc b/modules/cluster-logging-cpu-memory.adoc deleted file mode 100644 index 0971d084f6af..000000000000 --- a/modules/cluster-logging-cpu-memory.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-memory-limits_{context}"] -= Configuring CPU and memory limits - -The {logging} components allow for adjustments to both the CPU and memory limits. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - resources: <1> - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" - kibana: - resources: <2> - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - proxy: - resources: <2> - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - replicas: 2 - collection: - logs: - type: "fluentd" - fluentd: - resources: <3> - limits: - memory: 736Mi - requests: - cpu: 200m - memory: 736Mi ----- -<1> Specify the CPU and memory limits and requests for the log store as needed. For Elasticsearch, you must adjust both the request value and the limit value. -<2> Specify the CPU and memory limits and requests for the log visualizer as needed. -<3> Specify the CPU and memory limits and requests for the log collector as needed. diff --git a/modules/cluster-logging-dashboards-access.adoc b/modules/cluster-logging-dashboards-access.adoc deleted file mode 100644 index 9f1b1effaca1..000000000000 --- a/modules/cluster-logging-dashboards-access.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// -// * logging/cluster-logging-dashboards.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-dashboards-access_{context}"] -= Accessing the Elasticsearch and OpenShift Logging dashboards - -You can view the *Logging/Elasticsearch Nodes* and *OpenShift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}. -endif::[] - -.Procedure - -To launch the dashboards: - -ifndef::openshift-rosa,openshift-dedicated[] -. In the {product-title} web console, click *Observe* -> *Dashboards*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -. In the {product-title} {hybrid-console}, click *Observe* -> *Dashboards*. -endif::[] - -. On the *Dashboards* page, select *Logging/Elasticsearch Nodes* or *OpenShift Logging* from the *Dashboard* menu. -+ -For the *Logging/Elasticsearch Nodes* dashboard, you can select the Elasticsearch node you want to view and set the data resolution. -+ -The appropriate dashboard is displayed, showing multiple charts of data. - -. Optional: Select a different time range to display or refresh rate for the data from the *Time Range* and *Refresh Interval* menus. diff --git a/modules/cluster-logging-dashboards-es.adoc b/modules/cluster-logging-dashboards-es.adoc deleted file mode 100644 index 7db01c18985b..000000000000 --- a/modules/cluster-logging-dashboards-es.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-dashboards.adoc - -[id="cluster-logging-dashboards-es_{context}"] -= Charts on the Logging/Elasticsearch nodes dashboard - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node-level, for further diagnostics. - -Elasticsearch status:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the status of your Elasticsearch instance. - -.Elasticsearch status fields -[options="header"] -|=== -|Metric|Description - -|Cluster status -a|The cluster health status during the selected time period, using the Elasticsearch green, yellow, and red statuses: - -* 0 - Indicates that the Elasticsearch instance is in green status, which means that all shards are allocated. -* 1 - Indicates that the Elasticsearch instance is in yellow status, which means that replica shards for at least one shard are not allocated. -* 2 - Indicates that the Elasticsearch instance is in red status, which means that at least one primary shard and its replicas are not allocated. - -|Cluster nodes -|The total number of Elasticsearch nodes in the cluster. - -|Cluster data nodes -|The number of Elasticsearch data nodes in the cluster. - -|Cluster pending tasks -|The number of cluster state changes that are not finished and are waiting in a cluster queue, for example, index creation, index deletion, or shard allocation. A growing trend indicates that the cluster is not able to keep up with changes. - -|=== - -Elasticsearch cluster index shard status:: - -Each Elasticsearch index is a logical group of one or more shards, which are basic units of persisted data. There are two types of index shards: primary shards, and replica shards. When a document is indexed into an index, it is stored in one of its primary shards and copied into every replica of that shard. The number of primary shards is specified when the index is created, and the number cannot change during index lifetime. You can change the number of replica shards at any time. - -The index shard can be in several states depending on its lifecycle phase or events occurring in the cluster. When the shard is able to perform search and indexing requests, the shard is active. If the shard cannot perform these requests, the shard is non–active. A shard might be non-active if the shard is initializing, reallocating, unassigned, and so forth. - -Index shards consist of a number of smaller internal blocks, called index segments, which are physical representations of the data. An index segment is a relatively small, immutable Lucene index that is created when Lucene commits newly-indexed data. Lucene, a search library used by Elasticsearch, merges index segments into larger segments in the background to keep the total number of segments low. If the process of merging segments is slower than the speed at which new segments are created, it could indicate a problem. - -When Lucene performs data operations, such as a search operation, Lucene performs the operation against the index segments in the relevant index. For that purpose, each segment contains specific data structures that are loaded in the memory and mapped. Index mapping can have a significant impact on the memory used by segment data structures. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch index shards. - -.Elasticsearch cluster shard status charts -[options="header"] - -|=== -|Metric|Description - -|Cluster active shards -|The number of active primary shards and the total number of shards, including replicas, in the cluster. If the number of shards grows higher, the cluster performance can start degrading. - -|Cluster initializing shards -|The number of non-active shards in the cluster. A non-active shard is one that is initializing, being reallocated to a different node, or is unassigned. A cluster typically has non–active shards for short periods. A growing number of non–active shards over longer periods could indicate a problem. - -|Cluster relocating shards -|The number of shards that Elasticsearch is relocating to a new node. Elasticsearch relocates nodes for multiple reasons, such as high memory use on a node or after a new node is added to the cluster. - -|Cluster unassigned shards -|The number of unassigned shards. Elasticsearch shards might be unassigned for reasons such as a new index being added or the failure of a node. - -|=== - -Elasticsearch node metrics:: - -Each Elasticsearch node has a finite amount of resources that can be used to process tasks. When all the resources are being used and Elasticsearch attempts to perform a new task, Elasticsearch put the tasks into a queue until some resources become available. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about resource usage for a selected node and the number of tasks waiting in the Elasticsearch queue. - -.Elasticsearch node metric charts -[options="header"] -|=== -|Metric|Description - -|ThreadPool tasks -|The number of waiting tasks in individual queues, shown by task type. A long–term accumulation of tasks in any queue could indicate node resource shortages or some other problem. - -|CPU usage -|The amount of CPU being used by the selected Elasticsearch node as a percentage of the total CPU allocated to the host container. - -|Memory usage -|The amount of memory being used by the selected Elasticsearch node. - -|Disk usage -|The total disk space being used for index data and metadata on the selected Elasticsearch node. - -|Documents indexing rate -|The rate that documents are indexed on the selected Elasticsearch node. - -|Indexing latency -|The time taken to index the documents on the selected Elasticsearch node. Indexing latency can be affected by many factors, such as JVM Heap memory and overall load. A growing latency indicates a resource capacity shortage in the instance. - -|Search rate -|The number of search requests run on the selected Elasticsearch node. - -|Search latency -|The time taken to complete search requests on the selected Elasticsearch node. Search latency can be affected by many factors. A growing latency indicates a resource capacity shortage in the instance. - -|Documents count (with replicas) -|The number of Elasticsearch documents stored on the selected Elasticsearch node, including documents stored in both the primary shards and replica shards that are allocated on the node. - -|Documents deleting rate -|The number of Elasticsearch documents being deleted from any of the index shards that are allocated to the selected Elasticsearch node. - -|Documents merging rate -|The number of Elasticsearch documents being merged in any of index shards that are allocated to the selected Elasticsearch node. - -|=== - -Elasticsearch node fielddata:: - -link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/fielddata.html[_Fielddata_] is an Elasticsearch data structure that holds lists of terms in an index and is kept in the JVM Heap. Because fielddata building is an expensive operation, Elasticsearch caches the fielddata structures. Elasticsearch can evict a fielddata cache when the underlying index segment is deleted or merged, or if there is not enough JVM HEAP memory for all the fielddata caches. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch fielddata. - -.Elasticsearch node fielddata charts -[options="header"] -|=== -|Metric|Description - -|Fielddata memory size -|The amount of JVM Heap used for the fielddata cache on the selected Elasticsearch node. - -|Fielddata evictions -|The number of fielddata structures that were deleted from the selected Elasticsearch node. - -|=== - -Elasticsearch node query cache:: - -If the data stored in the index does not change, search query results are cached in a node-level query cache for reuse by Elasticsearch. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch node query cache. - -.Elasticsearch node query charts -[options="header"] -|=== -|Metric|Description - -|Query cache size -|The total amount of memory used for the query cache for all the shards allocated to the selected Elasticsearch node. - -|Query cache evictions -|The number of query cache evictions on the selected Elasticsearch node. - -|Query cache hits -|The number of query cache hits on the selected Elasticsearch node. - -|Query cache misses -|The number of query cache misses on the selected Elasticsearch node. - -|=== - -Elasticsearch index throttling:: - -When indexing documents, Elasticsearch stores the documents in index segments, which are physical representations of the data. At the same time, Elasticsearch periodically merges smaller segments into a larger segment as a way to optimize resource use. If the indexing is faster then the ability to merge segments, the merge process does not complete quickly enough, which can lead to issues with searches and performance. To prevent this situation, Elasticsearch throttles indexing, typically by reducing the number of threads allocated to indexing down to a single thread. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch index throttling. - -.Index throttling charts -[options="header"] -|=== -|Metric|Description - -|Indexing throttling -|The amount of time that Elasticsearch has been throttling the indexing operations on the selected Elasticsearch node. - -|Merging throttling -|The amount of time that Elasticsearch has been throttling the segment merge operations on the selected Elasticsearch node. - -|=== - -Node JVM Heap statistics:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about JVM Heap operations. - -.JVM Heap statistic charts -[options="header"] -|=== -|Metric|Description - -|Heap used -|The amount of the total allocated JVM Heap space that is used on the selected Elasticsearch node. - -|GC count -|The number of garbage collection operations that have been run on the selected Elasticsearch node, by old and young garbage collection. - -|GC time -|The amount of time that the JVM spent running garbage collection operations on the selected Elasticsearch node, by old and young garbage collection. - -|=== diff --git a/modules/cluster-logging-dashboards-logging.adoc b/modules/cluster-logging-dashboards-logging.adoc deleted file mode 100644 index 214d1677c6dc..000000000000 --- a/modules/cluster-logging-dashboards-logging.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-dashboards.adoc - -:_content-type: CONCEPT -[id="cluster-logging-dashboards-logging_{context}"] -= About the OpenShift Logging dashboard - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster-level that you can use to diagnose and anticipate problems. - -.OpenShift Logging charts -[options="header"] -|=== -|Metric|Description - -|Elastic Cluster Status -a|The current Elasticsearch status: - -* ONLINE - Indicates that the Elasticsearch instance is online. -* OFFLINE - Indicates that the Elasticsearch instance is offline. - -|Elastic Nodes -|The total number of Elasticsearch nodes in the Elasticsearch instance. - -|Elastic Shards -|The total number of Elasticsearch shards in the Elasticsearch instance. - -|Elastic Documents -|The total number of Elasticsearch documents in the Elasticsearch instance. - -|Total Index Size on Disk -|The total disk space that is being used for the Elasticsearch indices. - -|Elastic Pending Tasks -|The total number of Elasticsearch changes that have not been completed, such as index creation, index mapping, shard allocation, or shard failure. - -|Elastic JVM GC time -|The amount of time that the JVM spent executing Elasticsearch garbage collection operations in the cluster. - -|Elastic JVM GC Rate -|The total number of times that JVM executed garbage activities per second. - -|Elastic Query/Fetch Latency Sum -a|* Query latency: The average time each Elasticsearch search query takes to execute. -* Fetch latency: The average time each Elasticsearch search query spends fetching data. - -Fetch latency typically takes less time than query latency. If fetch latency is consistently increasing, it might indicate slow disks, data enrichment, or large requests with too many results. - -|Elastic Query Rate -|The total queries executed against the Elasticsearch instance per second for each Elasticsearch node. - -|CPU -|The amount of CPU used by Elasticsearch, Fluentd, and Kibana, shown for each component. - -|Elastic JVM Heap Used -|The amount of JVM memory used. In a healthy cluster, the graph shows regular drops as memory is freed by JVM garbage collection. - -|Elasticsearch Disk Usage -|The total disk space used by the Elasticsearch instance for each Elasticsearch node. - -|File Descriptors In Use -|The total number of file descriptors used by Elasticsearch, Fluentd, and Kibana. - -|FluentD emit count -|The total number of Fluentd messages per second for the Fluentd default output, and the retry count for the default output. - -|FluentD Buffer Availability -|The percent of the Fluentd buffer that is available for chunks. A full buffer might indicate that Fluentd is not able to process the number of logs received. - -|Elastic rx bytes -|The total number of bytes that Elasticsearch has received from FluentD, the Elasticsearch nodes, and other sources. - -|Elastic Index Failure Rate -|The total number of times per second that an Elasticsearch index fails. A high rate might indicate an issue with indexing. - -|FluentD Output Error Rate -|The total number of times per second that FluentD is not able to output logs. - -|=== diff --git a/modules/cluster-logging-deploy-certificates.adoc b/modules/cluster-logging-deploy-certificates.adoc deleted file mode 100644 index c6c25b166b50..000000000000 --- a/modules/cluster-logging-deploy-certificates.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-certificates_{context}"] -= Deploying custom certificates - -You can specify custom certificates using the following variables -instead of relying on those generated during the deployment process. These -certificates are used to encrypt and secure communication between a user's -browser and Kibana. The security-related files will be generated if they are not -supplied. - -[cols="3,7",options="header"] -|=== -|File Name -|Description - -|`openshift_logging_kibana_cert` -|A browser-facing certificate for the Kibana server. - -|`openshift_logging_kibana_key` -|A key to be used with the browser-facing Kibana certificate. - -|`openshift_logging_kibana_ca` -|The absolute path on the control node to the CA file to use -for the browser facing Kibana certs. - -|=== diff --git a/modules/cluster-logging-deploy-cli.adoc b/modules/cluster-logging-deploy-cli.adoc deleted file mode 100644 index 50fa7c12deee..000000000000 --- a/modules/cluster-logging-deploy-cli.adoc +++ /dev/null @@ -1,416 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-cli_{context}"] -= Installing the {logging-title} using the CLI - -You can use the {product-title} CLI to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. - -.Prerequisites - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== -+ -Elasticsearch is a memory-intensive application. By default, {product-title} installs three Elasticsearch nodes with memory requests and limits of 16 GB. This initial set of three {product-title} nodes might not have enough memory to run Elasticsearch within your cluster. If you experience memory issues that are related to Elasticsearch, add more Elasticsearch nodes to your cluster rather than increasing the memory on existing nodes. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in *Configuring {product-title} to use Red Hat Operators*. -endif::[] - -.Procedure - -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator using the CLI: - -. Create a namespace for the OpenShift Elasticsearch Operator. - -.. Create a namespace object YAML file (for example, `eo-namespace.yaml`) for the OpenShift Elasticsearch Operator: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-operators-redhat <1> - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" <2> ----- -<1> You must specify the `openshift-operators-redhat` namespace. To prevent possible conflicts with metrics, you should configure the Prometheus Cluster Monitoring stack to scrape metrics from the `openshift-operators-redhat` namespace and not the `openshift-operators` namespace. The `openshift-operators` namespace might contain community Operators, which are untrusted and could publish a metric with the same name as -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] -metric, which would cause conflicts. -<2> String. You must specify this label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-namespace.yaml ----- - -. Create a namespace for the Red Hat OpenShift Logging Operator: - -.. Create a namespace object YAML file (for example, `olo-namespace.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-logging - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" ----- - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-namespace.yaml ----- - -. Install the OpenShift Elasticsearch Operator by creating the following objects: - -.. Create an Operator Group object YAML file (for example, `eo-og.yaml`) for the OpenShift Elasticsearch Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: openshift-operators-redhat - namespace: openshift-operators-redhat <1> -spec: {} ----- -<1> You must specify the `openshift-operators-redhat` namespace. - -.. Create an Operator Group object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-og.yaml ----- - -.. Create a Subscription object YAML file (for example, `eo-sub.yaml`) to -subscribe a namespace to the OpenShift Elasticsearch Operator. -+ -.Example Subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: "elasticsearch-operator" - namespace: "openshift-operators-redhat" <1> -spec: - channel: "stable-5.5" <2> - installPlanApproval: "Automatic" <3> - source: "redhat-operators" <4> - sourceNamespace: "openshift-marketplace" - name: "elasticsearch-operator" ----- -<1> You must specify the `openshift-operators-redhat` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. See the following note. -<3> `Automatic` allows the Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. `Manual` requires a user with appropriate credentials to approve the Operator update. -<4> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, -specify the name of the CatalogSource object created when you configured the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -Specifying `stable` installs the current version of the latest stable release. Using `stable` with `installPlanApproval: "Automatic"`, will automatically upgrade your operators to the latest stable major and minor release. - -Specifying `stable-5.` installs the current minor version of a specific major release. Using `stable-5.` with `installPlanApproval: "Automatic"`, will automatically upgrade your operators to the latest stable minor release within the major release you specify with `x`. -==== - - -.. Create the Subscription object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-sub.yaml ----- -+ -The OpenShift Elasticsearch Operator is installed to the `openshift-operators-redhat` namespace and copied to each project in the cluster. - -.. Verify the Operator installation: -+ -[source,terminal] ----- -$ oc get csv --all-namespaces ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME DISPLAY VERSION REPLACES PHASE -default elasticsearch-operator.5.1.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-node-lease elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-public elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-system elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-apiserver-operator elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-apiserver elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-authentication-operator elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-authentication elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -... ----- -+ -There should be an OpenShift Elasticsearch Operator in each namespace. The version number might be different than shown. - -. Install the Red Hat OpenShift Logging Operator by creating the following objects: - -.. Create an Operator Group object YAML file (for example, `olo-og.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: cluster-logging - namespace: openshift-logging <1> -spec: - targetNamespaces: - - openshift-logging <1> ----- -<1> You must specify the `openshift-logging` namespace. - -.. Create an Operator Group object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-og.yaml ----- - -.. Create a Subscription object YAML file (for example, `olo-sub.yaml`) to -subscribe a namespace to the Red Hat OpenShift Logging Operator. -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: cluster-logging - namespace: openshift-logging <1> -spec: - channel: "stable" <2> - name: cluster-logging - source: redhat-operators <3> - sourceNamespace: openshift-marketplace ----- -<1> You must specify the `openshift-logging` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. -<3> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the CatalogSource object you created when you configured the Operator Lifecycle Manager (OLM). -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-sub.yaml ----- -+ -The Red Hat OpenShift Logging Operator is installed to the `openshift-logging` namespace. - -.. Verify the Operator installation. -+ -There should be a Red Hat OpenShift Logging Operator in the `openshift-logging` namespace. The Version number might be different than shown. -+ -[source,terminal] ----- -$ oc get csv -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME DISPLAY VERSION REPLACES PHASE -... -openshift-logging clusterlogging.5.1.0-202007012112.p0 OpenShift Logging 5.1.0-202007012112.p0 Succeeded -... ----- - -. Create an OpenShift Logging instance: - -.. Create an instance object YAML file (for example, `olo-instance.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[NOTE] -==== -This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information about modifications you can make to your OpenShift Logging cluster. -==== -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" -spec: - managementState: "Managed" <2> - logStore: - type: "elasticsearch" <3> - retentionPolicy: <4> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 <5> - storage: - storageClassName: "" <6> - size: 200G - resources: <7> - limits: - memory: "16Gi" - requests: - memory: "16Gi" - proxy: <8> - resources: - limits: - memory: 256Mi - requests: - memory: 256Mi - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" <9> - kibana: - replicas: 1 - collection: - logs: - type: "fluentd" <10> - fluentd: {} ----- -<1> The name must be `instance`. -<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. Placing a deployment back into a managed state might revert any modifications you made. -<3> Settings for configuring Elasticsearch. Using the custom resource (CR), you can configure shard replication policy and persistent storage. -<4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. -<5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys OpenShift Logging with ephemeral storage only. -<7> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that are sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<8> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. -<9> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana pods. For more information, see *Configuring the log visualizer*. -<10> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. -+ -[NOTE] -+ -==== -The maximum number of Elasticsearch control plane nodes is three. If you specify a `nodeCount` greater than `3`, {product-title} creates three Elasticsearch nodes that are Master-eligible nodes, with the master, client, and data roles. The additional Elasticsearch nodes are created as Data-only nodes, using client and data roles. Control plane nodes perform cluster-wide actions such as creating or deleting an index, shard allocation, and tracking nodes. Data nodes hold the shards and perform data-related operations such as CRUD, search, and aggregations. Data-related operations are I/O-, memory-, and CPU-intensive. It is important to monitor these resources and to add more Data nodes if the current nodes are overloaded. - -For example, if `nodeCount=4`, the following nodes are created: - -[source,terminal] ----- -$ oc get deployment ----- - -.Example output -[source,terminal] ----- -cluster-logging-operator 1/1 1 1 18h -elasticsearch-cd-x6kdekli-1 1/1 1 0 6m54s -elasticsearch-cdm-x6kdekli-1 1/1 1 1 18h -elasticsearch-cdm-x6kdekli-2 1/1 1 0 6m49s -elasticsearch-cdm-x6kdekli-3 1/1 1 0 6m44s ----- - -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -.. Create the instance: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-instance.yaml ----- -+ -This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. - -. Verify the installation by listing the pods in the *openshift-logging* project. -+ -You should see several pods for components of the Logging subsystem, similar to the following list: -+ -[source,terminal] ----- -$ oc get pods -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-66f77ffccb-ppzbg 1/1 Running 0 7m -elasticsearch-cdm-ftuhduuw-1-ffc4b9566-q6bhp 2/2 Running 0 2m40s -elasticsearch-cdm-ftuhduuw-2-7b4994dbfc-rd2gc 2/2 Running 0 2m36s -elasticsearch-cdm-ftuhduuw-3-84b5ff7ff8-gqnm2 2/2 Running 0 2m4s -collector-587vb 1/1 Running 0 2m26s -collector-7mpb9 1/1 Running 0 2m30s -collector-flm6j 1/1 Running 0 2m33s -collector-gn4rn 1/1 Running 0 2m26s -collector-nlgb6 1/1 Running 0 2m30s -collector-snpkt 1/1 Running 0 2m28s -kibana-d6d5668c5-rppqm 2/2 Running 0 2m39s ----- diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc deleted file mode 100644 index c5af639bf08f..000000000000 --- a/modules/cluster-logging-deploy-console.adoc +++ /dev/null @@ -1,244 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-console_{context}"] -= Installing the {logging-title} using the web console - -ifndef::openshift-rosa,openshift-dedicated[] -You can use the {product-title} web console to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -You can install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators by using the {product-title} {cluster-manager-url}. -endif::[] - -[NOTE] -==== -If you do not want to use the default Elasticsearch log store, you can remove the internal Elasticsearch `logStore` and Kibana `visualization` components from the `ClusterLogging` custom resource (CR). Removing these components is optional but saves resources. For more information, see the additional resources of this section. -==== - -.Prerequisites - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node -requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== -+ -Elasticsearch is a memory-intensive application. By default, {product-title} installs three Elasticsearch nodes with memory requests and limits of 16 GB. This initial set of three {product-title} nodes might not have enough memory to run Elasticsearch within your cluster. If you experience memory issues that are related to Elasticsearch, add more Elasticsearch nodes to your cluster rather than increasing the memory on existing nodes. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::[] - -.Procedure - -ifndef::openshift-rosa,openshift-dedicated[] -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator by using the {product-title} web console: -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator by using the {product-title} {cluster-manager-url}: -endif::[] - -. Install the OpenShift Elasticsearch Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *OperatorHub*. -endif::[] - -.. Choose *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. - -.. Ensure that the *All namespaces on the cluster* is selected under *Installation Mode*. - -.. Ensure that *openshift-operators-redhat* is selected under *Installed Namespace*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` namespace might contain Community Operators, which are untrusted and could publish a metric with the same name as -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] -metric, which would cause conflicts. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Select *stable-5.x* as the *Update Channel*. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *OpenShift Elasticsearch Operator* is listed in all projects with a *Status* of *Succeeded*. - -. Install the Red Hat OpenShift Logging Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. - -.. Ensure that the *A specific namespace on the cluster* is selected under *Installation Mode*. - -.. Ensure that *Operator recommended namespace* is *openshift-logging* under *Installed Namespace*. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-logging` namespace. - -.. Select *stable-5.x* as the *Update Channel*. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that the Red Hat OpenShift Logging Operator installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *Red Hat OpenShift Logging* is listed in the *openshift-logging* project with a *Status* of *Succeeded*. -+ -If the Operator does not appear as installed, to troubleshoot further: -+ -* Switch to the *Operators* → *Installed Operators* page and inspect the *Status* column for any errors or failures. -* Switch to the *Workloads* → *Pods* page and check the logs in any pods in the `openshift-logging` project that are reporting issues. - -. Create an OpenShift Logging instance: - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. On the *Custom Resource Definitions* page, click *ClusterLogging*. - -.. On the *Custom Resource Definition details* page, select *View Instances* from the *Actions* menu. - -.. On the *ClusterLoggings* page, click *Create ClusterLogging*. -+ -You might have to refresh the page to load the data. - -.. In the YAML field, replace the code with the following: -+ -[NOTE] -==== -This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information on modifications you can make to your OpenShift Logging cluster. -==== -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" -spec: - managementState: "Managed" <2> - logStore: - type: "elasticsearch" <3> - retentionPolicy: <4> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 <5> - storage: - storageClassName: "" <6> - size: 200G - resources: <7> - limits: - memory: "16Gi" - requests: - memory: "16Gi" - proxy: <8> - resources: - limits: - memory: 256Mi - requests: - memory: 256Mi - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" <9> - kibana: - replicas: 1 - collection: - logs: - type: "fluentd" <10> - fluentd: {} ----- -<1> The name must be `instance`. -<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. -<3> Settings for configuring Elasticsearch. Using the CR, you can configure shard replication policy and persistent storage. -<4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. -<5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, OpenShift Logging uses ephemeral storage. -<7> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<8> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. -<9> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. For more information, see *Configuring the log visualizer*. -<10> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. -+ -[NOTE] -+ -==== -The maximum number of Elasticsearch control plane nodes is three. If you specify a `nodeCount` greater than `3`, {product-title} creates three Elasticsearch nodes that are Master-eligible nodes, with the master, client, and data roles. The additional Elasticsearch nodes are created as Data-only nodes, using client and data roles. Control plane nodes perform cluster-wide actions such as creating or deleting an index, shard allocation, and tracking nodes. Data nodes hold the shards and perform data-related operations such as CRUD, search, and aggregations. Data-related operations are I/O-, memory-, and CPU-intensive. It is important to monitor these resources and to add more Data nodes if the current nodes are overloaded. - -For example, if `nodeCount=4`, the following nodes are created: - -[source,terminal] ----- -$ oc get deployment ----- - -.Example output -[source,terminal] ----- -cluster-logging-operator 1/1 1 1 18h -elasticsearch-cd-x6kdekli-1 0/1 1 0 6m54s -elasticsearch-cdm-x6kdekli-1 1/1 1 1 18h -elasticsearch-cdm-x6kdekli-2 0/1 1 0 6m49s -elasticsearch-cdm-x6kdekli-3 0/1 1 0 6m44s ----- - -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -.. Click *Create*. This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. - -. Verify the installation: - -.. Switch to the *Workloads* -> *Pods* page. - -.. Select the *openshift-logging* project. -+ -You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: -+ -* cluster-logging-operator-cb795f8dc-xkckc -* collector-pb2f8 -* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz -* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv -* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g -* fluentd-2c7dg -* fluentd-9z7kk -* fluentd-br7r2 -* fluentd-fn2sb -* fluentd-zqgqx -* kibana-7fb4fd4cc9-bvt4p \ No newline at end of file diff --git a/modules/cluster-logging-deploy-label.adoc b/modules/cluster-logging-deploy-label.adoc deleted file mode 100644 index 38a3aa28704e..000000000000 --- a/modules/cluster-logging-deploy-label.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-label_{context}"] -= Labeling nodes - -At 100 nodes or more, pre-pull the logging images from the registry. After deploying the logging pods, such as Elasticsearch and Kibana, node labeling should be done in steps of 20 nodes at a time. For example: - -Using a simple loop: - -[source,terminal] ----- -$ while read node; do oc label nodes $node elasticsearch-fluentd=true; done < 20_fluentd.lst ----- - -The following also works: - -[source,terminal] ----- -$ oc label nodes 10.10.0.{100..119} elasticsearch-fluentd=true ----- - -Labeling nodes in groups paces the daemon sets used by the {logging}, helping to avoid contention on shared resources such as the image registry. - -[NOTE] -==== -Check for the occurrence of any "CrashLoopBackOff | ImagePullFailed | Error" issues. -`oc logs `, `oc describe pod ` and `oc get event` are helpful diagnostic commands. -==== diff --git a/modules/cluster-logging-deploy-memory.adoc b/modules/cluster-logging-deploy-memory.adoc deleted file mode 100644 index accf34ab0172..000000000000 --- a/modules/cluster-logging-deploy-memory.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-memory_{context}"] -= Configure memory for Elasticsearch instances - -By default, the amount of RAM allocated to each ES instance is 16GB. You can change this value as needed. - -Keep in mind that *half* of this value will be passed to the individual -Elasticsearch pods java processes -link:https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#_give_half_your_memory_to_lucene[heap -size]. - -.Procedure - diff --git a/modules/cluster-logging-deploy-multitenant.adoc b/modules/cluster-logging-deploy-multitenant.adoc deleted file mode 100644 index 7a190bff3fc0..000000000000 --- a/modules/cluster-logging-deploy-multitenant.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-multitenant_{context}"] -= Allowing traffic between projects when network isolation is enabled - -Your cluster network plugin might enforce network isolation. If so, you must allow network traffic between the projects that contain the operators deployed by OpenShift Logging. - -Network isolation blocks network traffic between pods or services that are in different projects. The {logging} installs the _OpenShift Elasticsearch Operator_ in the `openshift-operators-redhat` project and the _Red Hat OpenShift Logging Operator_ in the `openshift-logging` project. Therefore, you must allow traffic between these two projects. - -{product-title} offers two supported choices for the network plugin, OpenShift SDN and OVN-Kubernetes. These two providers implement various network isolation policies. - -OpenShift SDN has three modes: - -network policy:: This is the default mode. If no policy is defined, it allows all traffic. However, if a user defines a policy, they typically start by denying all traffic and then adding exceptions. This process might break applications that are running in different projects. Therefore, explicitly configure the policy to allow traffic to egress from one logging-related project to the other. - -ifdef::openshift-enterprise,openshift-origin[] -multitenant:: This mode enforces network isolation. You must join the two logging-related projects to allow traffic between them. -endif::[] - -subnet:: This mode allows all traffic. It does not enforce network isolation. No action is needed. - -OVN-Kubernetes always uses a *network policy*. Therefore, as with OpenShift SDN, you must configure the policy to allow traffic to egress from one logging-related project to the other. - -.Procedure - -* If you are using OpenShift SDN in *multitenant* mode, join the two projects. For example: -+ -[source,terminal] ----- -$ oc adm pod-network join-projects --to=openshift-operators-redhat openshift-logging ----- - -* Otherwise, for OpenShift SDN in *network policy* mode and OVN-Kubernetes, perform the following actions: - -.. Set a label on the `openshift-operators-redhat` namespace. For example: -+ -[source,terminal] ----- -$ oc label namespace openshift-operators-redhat project=openshift-operators-redhat ----- - -.. Create a network policy object in the `openshift-logging` namespace that allows ingress from the `openshift-operators-redhat`, `openshift-monitoring` and `openshift-ingress` projects to the openshift-logging project. For example: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-openshift-monitoring-ingress-operators-redhat -spec: - ingress: - - from: - - podSelector: {} - - from: - - namespaceSelector: - matchLabels: - project: "openshift-operators-redhat" - - from: - - namespaceSelector: - matchLabels: - name: "openshift-monitoring" - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress ----- diff --git a/modules/cluster-logging-deploy-storage-considerations.adoc b/modules/cluster-logging-deploy-storage-considerations.adoc deleted file mode 100644 index 220e59386b62..000000000000 --- a/modules/cluster-logging-deploy-storage-considerations.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-storage-considerations_{context}"] -= Storage considerations for the {logging-title} - -//// -An Elasticsearch index is a collection of primary shards and their corresponding replica shards. This is how Elasticsearch implements high availability internally, so there is little requirement to use hardware based mirroring RAID variants. RAID 0 can still be used to increase overall disk performance. -//// - -A persistent volume is required for each Elasticsearch deployment configuration. On {product-title} this is achieved using persistent volume claims. - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== - -The OpenShift Elasticsearch Operator names the PVCs using the Elasticsearch resource name. - -//// -Below are capacity planning guidelines for {product-title} aggregate logging. - -*Example scenario* - -Assumptions: - -. Which application: Apache -. Bytes per line: 256 -. Lines per second load on application: 1 -. Raw text data -> JSON - -Baseline (256 characters per minute -> 15KB/min) - -[cols="3,4",options="header"] -|=== -|Logging pods -|Storage Throughput - -|3 es -1 kibana -1 fluentd -| 6 pods total: 90000 x 86400 = 7,7 GB/day - -|3 es -1 kibana -11 fluentd -| 16 pods total: 225000 x 86400 = 24,0 GB/day - -|3 es -1 kibana -20 fluentd -|25 pods total: 225000 x 86400 = 32,4 GB/day -|=== - - -Calculating the total logging throughput and disk space required for your {product-title} cluster requires knowledge of your applications. For example, if one of your applications on average logs 10 lines-per-second, each 256 bytes-per-line, calculate per-application throughput and disk space as follows: - ----- - (bytes-per-line * (lines-per-second) = 2560 bytes per app per second - (2560) * (number-of-pods-per-node,100) = 256,000 bytes per second per node - 256k * (number-of-nodes) = total logging throughput per cluster per second ----- -//// - -Fluentd ships any logs from *systemd journal* and **/var/log/containers/*.log** to Elasticsearch. - -Elasticsearch requires sufficient memory to perform large merge operations. If it does not have enough memory, it becomes unresponsive. To avoid this problem, evaluate how much application log data you need, and allocate approximately double that amount of free storage capacity. - -By default, when storage capacity is 85% full, Elasticsearch stops allocating new data to the node. At 90%, Elasticsearch attempts to relocate existing shards from that node to other nodes if possible. But if no nodes have a free capacity below 85%, Elasticsearch effectively rejects creating new indices and becomes RED. - -[NOTE] -==== -These low and high watermark values are Elasticsearch defaults in the current release. You can modify these default values. Although the alerts use the same default values, you cannot change these values in the alerts. -==== diff --git a/modules/cluster-logging-deploying-about.adoc b/modules/cluster-logging-deploying-about.adoc deleted file mode 100644 index 1fb3db28544e..000000000000 --- a/modules/cluster-logging-deploying-about.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying-about.adoc -// * serverless/monitor/cluster-logging-serverless.adoc - -:_content-type: CONCEPT -[id="cluster-logging-deploying-about_{context}"] -= About deploying and configuring the {logging-title} - -The {logging} is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. - -The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create a {logging} instance and configure your {logging} environment. - -If you want to use the default {logging} install, you can use the sample CR directly. - -If you want to customize your deployment, make changes to the sample CR as needed. The following describes the configurations you can make when installing your OpenShift Logging instance or modify after installation. See the Configuring sections for more information on working with each component, including modifications you can make outside of the `ClusterLogging` custom resource. - -[id="cluster-logging-deploy-about-config_{context}"] -== Configuring and Tuning the {logging} - -You can configure your {logging} by modifying the `ClusterLogging` custom resource deployed -in the `openshift-logging` project. - -You can modify any of the following components upon install or after install: - -Memory and CPU:: -You can adjust both the CPU and memory limits for each component by modifying the `resources` -block with valid memory and CPU values: - -[source,yaml] ----- -spec: - logStore: - elasticsearch: - resources: - limits: - cpu: - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - type: "elasticsearch" - collection: - logs: - fluentd: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: "fluentd" - visualization: - kibana: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: kibana ----- - -Elasticsearch storage:: -You can configure a persistent storage class and size for the Elasticsearch cluster using the `storageClass` `name` and `size` parameters. The Red Hat OpenShift Logging Operator creates a persistent volume claim (PVC) for each data node in the Elasticsearch cluster based on these parameters. - -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200G" ----- - -This example specifies each data node in the cluster will be bound to a PVC that -requests "200G" of "gp2" storage. Each primary shard will be backed by a single replica. - -[NOTE] -==== -Omitting the `storage` block results in a deployment that includes ephemeral storage only. - -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: {} ----- -==== - -Elasticsearch replication policy:: -You can set the policy that defines how Elasticsearch shards are replicated across data nodes in the cluster: - -* `FullRedundancy`. The shards for each index are fully replicated to every data node. -* `MultipleRedundancy`. The shards for each index are spread over half of the data nodes. -* `SingleRedundancy`. A single copy of each shard. Logs are always available and recoverable as long as at least two data nodes exist. -* `ZeroRedundancy`. No copies of any shards. Logs may be unavailable (or lost) in the event a node is down or fails. - -//// -Log collectors:: -You can select which log collector is deployed as a daemon set to each node in the {product-title} cluster, either: - -* Fluentd - The default log collector based on Fluentd. -* Rsyslog - Alternate log collector supported as **Tech Preview** only. - ----- - spec: - collection: - logs: - fluentd: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: "fluentd" ----- -//// - -[id="cluster-logging-deploy-about-sample_{context}"] -== Sample modified ClusterLogging custom resource - -The following is an example of a `ClusterLogging` custom resource modified using the options previously described. - -.Sample modified `ClusterLogging` custom resource -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - retentionPolicy: - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 - resources: - limits: - cpu: 200m - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" - kibana: - resources: - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - replicas: 1 - collection: - logs: - type: "fluentd" - fluentd: - resources: - limits: - memory: 1Gi - requests: - cpu: 200m - memory: 1Gi ----- diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc deleted file mode 100644 index d08144f52214..000000000000 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-audit_{context}"] -= Forwarding audit logs to the log store - -By default, OpenShift Logging does not store audit logs in the internal {product-title} Elasticsearch log store. You can send audit logs to this log store so, for example, you can view them in Kibana. - -To send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API. - -[IMPORTANT] -==== -The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. Verify that the system to which you forward audit logs complies with your organizational and governmental regulations and is properly secured. The {logging-title} does not comply with those regulations. -==== - -.Procedure - -To use the Log Forwarding API to forward audit logs to the internal Elasticsearch instance: - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -* Create a CR to send all log types to the internal Elasticsearch instance. You can use the following example without making any changes: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: <1> - - name: all-to-default - inputRefs: - - infrastructure - - application - - audit - outputRefs: - - default ----- -<1> A pipeline defines the type of logs to forward using the specified output. The default output forwards logs to the internal Elasticsearch instance. -+ -[NOTE] -==== -You must specify all three types of logs in the pipeline: application, infrastructure, and audit. If you do not specify a log type, those logs are not stored and will be lost. -==== -+ -* If you have an existing `ClusterLogForwarder` CR, add a pipeline to the default output for the audit logs. You do not need to define the default output. For example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch-insecure - type: "elasticsearch" - url: http://elasticsearch-insecure.messaging.svc.cluster.local - insecure: true - - name: elasticsearch-secure - type: "elasticsearch" - url: https://elasticsearch-secure.messaging.svc.cluster.local - secret: - name: es-audit - - name: secureforward-offcluster - type: "fluentdForward" - url: https://secureforward.offcluster.com:24224 - secret: - name: secureforward - pipelines: - - name: container-logs - inputRefs: - - application - outputRefs: - - secureforward-offcluster - - name: infra-logs - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - - name: audit-logs - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default <1> ----- -<1> This pipeline sends the audit logs to the internal Elasticsearch instance in addition to an external instance. diff --git a/modules/cluster-logging-elasticsearch-exposing.adoc b/modules/cluster-logging-elasticsearch-exposing.adoc deleted file mode 100644 index d8df4bfaa19b..000000000000 --- a/modules/cluster-logging-elasticsearch-exposing.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-exposing_{context}"] -= Exposing the log store service as a route - -By default, the log store that is deployed with the {logging-title} is not accessible from outside the logging cluster. You can enable a route with re-encryption termination for external access to the log store service for those tools that access its data. - -Externally, you can access the log store by creating a reencrypt route, your {product-title} token and the installed log store CA certificate. Then, access a node that hosts the log store service with a cURL request that contains: - -* The `Authorization: Bearer ${token}` -* The Elasticsearch reencrypt route and an link:https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html[Elasticsearch API request]. - -Internally, you can access the log store service using the log store cluster IP, -which you can get by using either of the following commands: - -[source,terminal] ----- -$ oc get service elasticsearch -o jsonpath={.spec.clusterIP} -n openshift-logging ----- - -.Example output -[source,terminal] ----- -172.30.183.229 ----- - -[source,terminal] ----- -$ oc get service elasticsearch -n openshift-logging ----- - -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -elasticsearch ClusterIP 172.30.183.229 9200/TCP 22h ----- - -You can check the cluster IP address with a command similar to the following: - -[source,terminal] ----- -$ oc exec elasticsearch-cdm-oplnhinv-1-5746475887-fj2f8 -n openshift-logging -- curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://172.30.183.229:9200/_cat/health" ----- - -.Example output -[source,terminal] ----- - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 29 100 29 0 0 108 0 --:--:-- --:--:-- --:--:-- 108 ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* You must have access to the project to be able to access to the logs. - -.Procedure - -To expose the log store externally: - -. Change to the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Extract the CA certificate from the log store and write to the *_admin-ca_* file: -+ -[source,terminal] ----- -$ oc extract secret/elasticsearch --to=. --keys=admin-ca ----- -+ -.Example output -[source,terminal] ----- -admin-ca ----- - -. Create the route for the log store service as a YAML file: -+ -.. Create a YAML file with the following: -+ -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: elasticsearch - namespace: openshift-logging -spec: - host: - to: - kind: Service - name: elasticsearch - tls: - termination: reencrypt - destinationCACertificate: | <1> ----- -<1> Add the log store CA certifcate or use the command in the next step. You do not have to set the `spec.tls.key`, `spec.tls.certificate`, and `spec.tls.caCertificate` parameters required by some reencrypt routes. - -.. Run the following command to add the log store CA certificate to the route YAML you created in the previous step: -+ -[source,terminal] ----- -$ cat ./admin-ca | sed -e "s/^/ /" >> .yaml ----- - -.. Create the route: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Example output -[source,terminal] ----- -route.route.openshift.io/elasticsearch created ----- -+ -//For an example reencrypt route object, see Re-encryption Termination. -//+ -//This line ^^ will be linked when the topic is available. - -. Check that the Elasticsearch service is exposed: - -.. Get the token of this service account to be used in the request: -+ -[source,terminal] ----- -$ token=$(oc whoami -t) ----- - -.. Set the *elasticsearch* route you created as an environment variable. -+ -[source,terminal] ----- -$ routeES=`oc get route elasticsearch -o jsonpath={.spec.host}` ----- - -.. To verify the route was successfully created, run the following command that accesses Elasticsearch through the exposed route: -+ -[source,terminal] ----- -curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://${routeES}" ----- -+ -The response appears similar to the following: -+ -.Example output -[source,json] ----- -{ - "name" : "elasticsearch-cdm-i40ktba0-1", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "0eY-tJzcR3KOdpgeMJo-MQ", - "version" : { - "number" : "6.8.1", - "build_flavor" : "oss", - "build_type" : "zip", - "build_hash" : "Unknown", - "build_date" : "Unknown", - "build_snapshot" : true, - "lucene_version" : "7.7.0", - "minimum_wire_compatibility_version" : "5.6.0", - "minimum_index_compatibility_version" : "5.0.0" -}, - "" : "" -} ----- diff --git a/modules/cluster-logging-elasticsearch-ha.adoc b/modules/cluster-logging-elasticsearch-ha.adoc deleted file mode 100644 index 779ada7aaddb..000000000000 --- a/modules/cluster-logging-elasticsearch-ha.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-ha_{context}"] -= Configuring replication policy for the log store - -You can define how Elasticsearch shards are replicated across data nodes in the cluster. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit clusterlogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - logStore: - type: "elasticsearch" - elasticsearch: - redundancyPolicy: "SingleRedundancy" <1> ----- -<1> Specify a redundancy policy for the shards. The change is applied upon saving the changes. -+ -* *FullRedundancy*. Elasticsearch fully replicates the primary shards for each index -to every data node. This provides the highest safety, but at the cost of the highest amount of disk required and the poorest performance. -* *MultipleRedundancy*. Elasticsearch fully replicates the primary shards for each index to half of the data nodes. -This provides a good tradeoff between safety and performance. -* *SingleRedundancy*. Elasticsearch makes one copy of the primary shards for each index. -Logs are always available and recoverable as long as at least two data nodes exist. -Better performance than MultipleRedundancy, when using 5 or more nodes. You cannot -apply this policy on deployments of single Elasticsearch node. -* *ZeroRedundancy*. Elasticsearch does not make copies of the primary shards. -Logs might be unavailable or lost in the event a node is down or fails. -Use this mode when you are more concerned with performance than safety, or have -implemented your own disk/PVC backup/restore strategy. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - diff --git a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc deleted file mode 100644 index 45744345a89c..000000000000 --- a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch-storage.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-persistent-storage-empty_{context}"] -= Configuring the log store for emptyDir storage - -You can use emptyDir with your log store, which creates an ephemeral -deployment in which all of a pod's data is lost upon restart. - -[NOTE] -==== -When using emptyDir, if log storage is restarted or redeployed, you will lose data. -==== - -.Prerequisites -//Find & replace the below according to SME feedback. -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify emptyDir: -+ -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: {} ----- diff --git a/modules/cluster-logging-elasticsearch-retention.adoc b/modules/cluster-logging-elasticsearch-retention.adoc deleted file mode 100644 index 2673a9bfe041..000000000000 --- a/modules/cluster-logging-elasticsearch-retention.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-retention_{context}"] -= Configuring log retention time - -You can configure a _retention policy_ that specifies how long the default Elasticsearch log store keeps indices for each of the three log sources: infrastructure logs, application logs, and audit logs. - -To configure the retention policy, you set a `maxAge` parameter for each log source in the `ClusterLogging` custom resource (CR). The CR applies these values to the Elasticsearch rollover schedule, which determines when Elasticsearch deletes the rolled-over indices. - -Elasticsearch rolls over an index, moving the current index and creating a new index, when an index matches any of the following conditions: - -* The index is older than the `rollover.maxAge` value in the `Elasticsearch` CR. -* The index size is greater than 40 GB × the number of primary shards. -* The index doc count is greater than 40960 KB × the number of primary shards. - -Elasticsearch deletes the rolled-over indices based on the retention policy you configure. If you do not create a retention policy for any log sources, logs are deleted after seven days by default. - -.Prerequisites -//SME Feedback Req: There are a few instances of these for prereqs. Should OpenShift Logging here be the Red Hat OpenShift Logging Operator or the logging product name? -* The {logging-title} and the OpenShift Elasticsearch Operator must be installed. - -.Procedure - -To configure the log retention time: - -. Edit the `ClusterLogging` CR to add or modify the `retentionPolicy` parameter: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -... -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - retentionPolicy: <1> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 -... ----- -<1> Specify the time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `1d` for one day. Logs older than the `maxAge` are deleted. By default, logs are retained for seven days. - -. You can verify the settings in the `Elasticsearch` custom resource (CR). -+ -For example, the Red Hat OpenShift Logging Operator updated the following `Elasticsearch` CR to configure a retention policy that includes settings to roll over active indices for the infrastructure logs every eight hours and the rolled-over indices are deleted seven days after rollover. {product-title} checks every 15 minutes to determine if the indices need to be rolled over. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "Elasticsearch" -metadata: - name: "elasticsearch" -spec: -... - indexManagement: - policies: <1> - - name: infra-policy - phases: - delete: - minAge: 7d <2> - hot: - actions: - rollover: - maxAge: 8h <3> - pollInterval: 15m <4> -... ----- -<1> For each log source, the retention policy indicates when to delete and roll over logs for that source. -<2> When {product-title} deletes the rolled-over indices. This setting is the `maxAge` you set in the `ClusterLogging` CR. -<3> The index age for {product-title} to consider when rolling over the indices. This value is determined from the `maxAge` you set in the `ClusterLogging` CR. -<4> When {product-title} checks if the indices should be rolled over. This setting is the default and cannot be changed. -+ -[NOTE] -==== -Modifying the `Elasticsearch` CR is not supported. All changes to the retention policies must be made in the `ClusterLogging` CR. -==== -+ -The OpenShift Elasticsearch Operator deploys a cron job to roll over indices for each mapping using the defined policy, scheduled using the `pollInterval`. -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -.Example output -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 4s -elasticsearch-im-audit */15 * * * * False 0 4s -elasticsearch-im-infra */15 * * * * False 0 4s ----- diff --git a/modules/cluster-logging-elasticsearch-rules.adoc b/modules/cluster-logging-elasticsearch-rules.adoc deleted file mode 100644 index 1e4ba49ad24e..000000000000 --- a/modules/cluster-logging-elasticsearch-rules.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: CONCEPT -[id="cluster-logging-elasticsearch-rules_{context}"] -= About Elasticsearch alerting rules - -You can view these alerting rules in Prometheus. - -.Alerting rules -[cols="3,6,1",options="header"] -|=== -|Alert -|Description -|Severity - - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been RED for at least 2 minutes. The cluster does not accept writes, shards may be missing, or the master - node hasn't been elected yet. -|Critical - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been YELLOW for at least 20 minutes. Some shard replicas are not allocated. -|Warning - -|`ElasticsearchDiskSpaceRunningLow` -|The cluster is expected to be out of disk space within the next 6 hours. -|Critical - -|`ElasticsearchHighFileDescriptorUsage` -|The cluster is predicted to be out of file descriptors within the next hour. -|Warning - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is high. -|Alert - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the low watermark due to low free disk space. Shards can not be allocated to this node anymore. You should consider adding more disk space to the node. -|Info - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the high watermark due to low free disk space. Some shards will be re-allocated to different -nodes if possible. Make sure more disk space is added to the node or drop old indices allocated to this node. -|Warning - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the flood watermark due to low free disk space. Every index that has a shard allocated on this node is enforced a read-only block. The index block must be manually released when the disk use falls below the high watermark. -|Critical - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is too high. -|Alert - -|`ElasticsearchWriteRequestsRejectionJumps` -|Elasticsearch is experiencing an increase in write rejections on the specified node. This node might not be keeping up with the indexing speed. -|Warning - -|`AggregatedLoggingSystemCPUHigh` -|The CPU used by the system on the specified node is too high. -|Alert - -|`ElasticsearchProcessCPUHigh` -|The CPU used by Elasticsearch on the specified node is too high. -|Alert -|=== diff --git a/modules/cluster-logging-elasticsearch-scaledown.adoc b/modules/cluster-logging-elasticsearch-scaledown.adoc deleted file mode 100644 index dea23e5cc746..000000000000 --- a/modules/cluster-logging-elasticsearch-scaledown.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-log-store.adoc - -[id="cluster-logging-elasticsearch-scaledown_{context}"] -= Scaling down Elasticsearch pods - -Reducing the number of Elasticsearch pods in your cluster can result in data loss or Elasticsearch performance degradation. - -If you scale down, you should scale down by one pod at a time and allow the cluster to re-balance the shards and replicas. After the Elasticsearch health status returns to `green`, you can scale down by another pod. - -[NOTE] -==== -If your Elasticsearch cluster is set to `ZeroRedundancy`, you should not scale down your Elasticsearch pods. -==== diff --git a/modules/cluster-logging-elasticsearch-storage.adoc b/modules/cluster-logging-elasticsearch-storage.adoc deleted file mode 100644 index b850964b9441..000000000000 --- a/modules/cluster-logging-elasticsearch-storage.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-storage_{context}"] -= Configuring persistent storage for the log store - -Elasticsearch requires persistent storage. The faster the storage, the faster the Elasticsearch performance. - -[WARNING] -==== -Using NFS storage as a volume or a persistent volume (or via NAS such as -Gluster) is not supported for Elasticsearch storage, as Lucene relies on file -system behavior that NFS does not supply. Data corruption and other problems can -occur. -==== - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify that each data node in the cluster is bound to a Persistent Volume Claim. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -# ... -spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200G" ----- - -This example specifies each data node in the cluster is bound to a Persistent Volume Claim that requests "200G" of AWS General Purpose SSD (gp2) storage. - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== diff --git a/modules/cluster-logging-elasticsearch-tolerations.adoc b/modules/cluster-logging-elasticsearch-tolerations.adoc deleted file mode 100644 index e64a8339de5a..000000000000 --- a/modules/cluster-logging-elasticsearch-tolerations.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-tolerations_{context}"] -= Using tolerations to control the log store pod placement - -You can control which nodes the log store pods runs on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log store pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the log store pods can run on that node. - -By default, the log store pods have the following toleration: - -[source,yaml] ----- -tolerations: -- effect: "NoExecute" - key: "node.kubernetes.io/disk-pressure" - operator: "Exists" ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want to schedule the OpenShift Logging pods: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 elasticsearch=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `elasticsearch`, value `node`, and taint effect `NoExecute`. -Nodes with the `NoExecute` effect schedule only pods that match the taint and remove existing pods -that do not match. - -. Edit the `logstore` section of the `ClusterLogging` CR to configure a toleration for the Elasticsearch pods: -+ -[source,yaml] ----- - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 1 - tolerations: - - key: "elasticsearch" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require a taint with the key `elasticsearch` to be present on the Node. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration could be scheduled onto `node1`. diff --git a/modules/cluster-logging-eventrouter-about.adoc b/modules/cluster-logging-eventrouter-about.adoc deleted file mode 100644 index 690662b9f531..000000000000 --- a/modules/cluster-logging-eventrouter-about.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-eventrouter-about_{context}"] -= About event routing - -The Event Router is a pod that watches {product-title} events so they can be collected by the {logging-title}. -The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. - -You must manually deploy the Event Router. diff --git a/modules/cluster-logging-eventrouter-deploy.adoc b/modules/cluster-logging-eventrouter-deploy.adoc deleted file mode 100644 index f9392832eac4..000000000000 --- a/modules/cluster-logging-eventrouter-deploy.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-eventrouter.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-eventrouter-deploy_{context}"] -= Deploying and configuring the Event Router - -Use the following steps to deploy the Event Router into your cluster. You should always deploy the Event Router to the `openshift-logging` project to ensure it collects events from across the cluster. - -The following Template object creates the service account, cluster role, and cluster role binding required for the Event Router. The template also configures and deploys the Event Router pod. You can use this template without making changes, or change the deployment object CPU and memory requests. - -.Prerequisites - -* You need proper permissions to create service accounts and update cluster role bindings. For example, you can run the following template with a user that has the *cluster-admin* role. - -* The {logging-title} must be installed. - -.Procedure - -. Create a template for the Event Router: -+ -[source,yaml] ----- -kind: Template -apiVersion: template.openshift.io/v1 -metadata: - name: eventrouter-template - annotations: - description: "A pod forwarding kubernetes events to OpenShift Logging stack." - tags: "events,EFK,logging,cluster-logging" -objects: - - kind: ServiceAccount <1> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - - kind: ClusterRole <2> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader - rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "watch", "list"] - - kind: ClusterRoleBinding <3> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader-binding - subjects: - - kind: ServiceAccount - name: eventrouter - namespace: ${NAMESPACE} - roleRef: - kind: ClusterRole - name: event-reader - - kind: ConfigMap <4> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - data: - config.json: |- - { - "sink": "stdout" - } - - kind: Deployment <5> - apiVersion: apps/v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - spec: - selector: - matchLabels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - replicas: 1 - template: - metadata: - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - name: eventrouter - spec: - serviceAccount: eventrouter - containers: - - name: kube-eventrouter - image: ${IMAGE} - imagePullPolicy: IfNotPresent - resources: - requests: - cpu: ${CPU} - memory: ${MEMORY} - volumeMounts: - - name: config-volume - mountPath: /etc/eventrouter - volumes: - - name: config-volume - configMap: - name: eventrouter -parameters: - - name: IMAGE <6> - displayName: Image - value: "registry.redhat.io/openshift-logging/eventrouter-rhel8:v0.4" - - name: CPU <7> - displayName: CPU - value: "100m" - - name: MEMORY <8> - displayName: Memory - value: "128Mi" - - name: NAMESPACE - displayName: Namespace - value: "openshift-logging" <9> ----- -<1> Creates a Service Account in the `openshift-logging` project for the Event Router. -<2> Creates a ClusterRole to monitor for events in the cluster. -<3> Creates a ClusterRoleBinding to bind the ClusterRole to the service account. -<4> Creates a config map in the `openshift-logging` project to generate the required `config.json` file. -<5> Creates a deployment in the `openshift-logging` project to generate and configure the Event Router pod. -<6> Specifies the image, identified by a tag such as `v0.4`. -<7> Specifies the minimum amount of CPU to allocate to the Event Router pod. Defaults to `100m`. -<8> Specifies the minimum amount of memory to allocate to the Event Router pod. Defaults to `128Mi`. -<9> Specifies the `openshift-logging` project to install objects in. - -. Use the following command to process and apply the template: -+ -[source,terminal] ----- -$ oc process -f | oc apply -n openshift-logging -f - ----- -+ -For example: -+ -[source,terminal] ----- -$ oc process -f eventrouter.yaml | oc apply -n openshift-logging -f - ----- -+ -.Example output -[source,terminal] ----- -serviceaccount/eventrouter created -clusterrole.authorization.openshift.io/event-reader created -clusterrolebinding.authorization.openshift.io/event-reader-binding created -configmap/eventrouter created -deployment.apps/eventrouter created ----- - -. Validate that the Event Router installed in the `openshift-logging` project: -+ -.. View the new Event Router pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=eventrouter -o name -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -pod/cluster-logging-eventrouter-d649f97c8-qvv8r ----- - -.. View the events collected by the Event Router: -+ -[source,terminal] ----- -$ oc logs -n openshift-logging ----- -+ -For example: -+ -[source,terminal] ----- -$ oc logs cluster-logging-eventrouter-d649f97c8-qvv8r -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -{"verb":"ADDED","event":{"metadata":{"name":"openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","namespace":"openshift-service-catalog-removed","selfLink":"/api/v1/namespaces/openshift-service-catalog-removed/events/openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","uid":"787d7b26-3d2f-4017-b0b0-420db4ae62c0","resourceVersion":"21399","creationTimestamp":"2020-09-08T15:40:26Z"},"involvedObject":{"kind":"Job","namespace":"openshift-service-catalog-removed","name":"openshift-service-catalog-controller-manager-remover","uid":"fac9f479-4ad5-4a57-8adc-cb25d3d9cf8f","apiVersion":"batch/v1","resourceVersion":"21280"},"reason":"Completed","message":"Job completed","source":{"component":"job-controller"},"firstTimestamp":"2020-09-08T15:40:26Z","lastTimestamp":"2020-09-08T15:40:26Z","count":1,"type":"Normal"}} ----- -+ -You can also use Kibana to view events by creating an index pattern using the Elasticsearch `infra` index. diff --git a/modules/cluster-logging-export-fields.adoc b/modules/cluster-logging-export-fields.adoc deleted file mode 100644 index aed1b489be72..000000000000 --- a/modules/cluster-logging-export-fields.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-export-fields-about_{context}"] -= About exporting fields - -The logging system exports fields. Exported fields are present in the log records and are available for searching from Elasticsearch and Kibana. diff --git a/modules/cluster-logging-exported-fields-kubernetes.adoc b/modules/cluster-logging-exported-fields-kubernetes.adoc deleted file mode 100644 index 691a0900e551..000000000000 --- a/modules/cluster-logging-exported-fields-kubernetes.adoc +++ /dev/null @@ -1,272 +0,0 @@ -[id="cluster-logging-exported-fields-kubernetes_{context}"] - -// Normally, the following title would be an H1 prefixed with an `=`. However, because the following content is auto-generated at https://github.com/ViaQ/documentation/blob/main/src/data_model/public/kubernetes.part.adoc and pasted here, it is more efficient to use it as-is with no modifications. Therefore, to "realign" the content, I am going to prefix the title with `==` and use `include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0]` in the assembly file. - -// DO NOT MODIFY THE FOLLOWING CONTENT. Instead, update https://github.com/ViaQ/documentation/blob/main/src/data_model/model/kubernetes.yaml and run `make` as instructed here: https://github.com/ViaQ/documentation - - -== kubernetes - -The namespace for Kubernetes-specific metadata - -[horizontal] -Data type:: group - -=== kubernetes.pod_name - -The name of the pod - -[horizontal] -Data type:: keyword - - -=== kubernetes.pod_id - -The Kubernetes ID of the pod - -[horizontal] -Data type:: keyword - - -=== kubernetes.namespace_name - -The name of the namespace in Kubernetes - -[horizontal] -Data type:: keyword - - -=== kubernetes.namespace_id - -The ID of the namespace in Kubernetes - -[horizontal] -Data type:: keyword - - -=== kubernetes.host - -The Kubernetes node name - -[horizontal] -Data type:: keyword - - - -=== kubernetes.container_name - -The name of the container in Kubernetes - -[horizontal] -Data type:: keyword - - - -=== kubernetes.annotations - -Annotations associated with the Kubernetes object - -[horizontal] -Data type:: group - - -=== kubernetes.labels - -Labels present on the original Kubernetes Pod - -[horizontal] -Data type:: group - - - - - - -=== kubernetes.event - -The Kubernetes event obtained from the Kubernetes master API. This event description loosely follows `type Event` in link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#event-v1-core[Event v1 core]. - -[horizontal] -Data type:: group - -==== kubernetes.event.verb - -The type of event, `ADDED`, `MODIFIED`, or `DELETED` - -[horizontal] -Data type:: keyword -Example value:: `ADDED` - - -==== kubernetes.event.metadata - -Information related to the location and time of the event creation - -[horizontal] -Data type:: group - -===== kubernetes.event.metadata.name - -The name of the object that triggered the event creation - -[horizontal] -Data type:: keyword -Example value:: `java-mainclass-1.14d888a4cfc24890` - - -===== kubernetes.event.metadata.namespace - -The name of the namespace where the event originally occurred. Note that it differs from `kubernetes.namespace_name`, which is the namespace where the `eventrouter` application is deployed. - -[horizontal] -Data type:: keyword -Example value:: `default` - - -===== kubernetes.event.metadata.selfLink - -A link to the event - -[horizontal] -Data type:: keyword -Example value:: `/api/v1/namespaces/javaj/events/java-mainclass-1.14d888a4cfc24890` - - -===== kubernetes.event.metadata.uid - -The unique ID of the event - -[horizontal] -Data type:: keyword -Example value:: `d828ac69-7b58-11e7-9cf5-5254002f560c` - - -===== kubernetes.event.metadata.resourceVersion - -A string that identifies the server's internal version of the event. Clients can use this string to determine when objects have changed. - -[horizontal] -Data type:: integer -Example value:: `311987` - - - -==== kubernetes.event.involvedObject - -The object that the event is about. - -[horizontal] -Data type:: group - -===== kubernetes.event.involvedObject.kind - -The type of object - -[horizontal] -Data type:: keyword -Example value:: `ReplicationController` - - -===== kubernetes.event.involvedObject.namespace - -The namespace name of the involved object. Note that it may differ from `kubernetes.namespace_name`, which is the namespace where the `eventrouter` application is deployed. - -[horizontal] -Data type:: keyword -Example value:: `default` - - -===== kubernetes.event.involvedObject.name - -The name of the object that triggered the event - -[horizontal] -Data type:: keyword -Example value:: `java-mainclass-1` - - -===== kubernetes.event.involvedObject.uid - -The unique ID of the object - -[horizontal] -Data type:: keyword -Example value:: `e6bff941-76a8-11e7-8193-5254002f560c` - - -===== kubernetes.event.involvedObject.apiVersion - -The version of kubernetes master API - -[horizontal] -Data type:: keyword -Example value:: `v1` - - -===== kubernetes.event.involvedObject.resourceVersion - -A string that identifies the server's internal version of the pod that triggered the event. Clients can use this string to determine when objects have changed. - -[horizontal] -Data type:: keyword -Example value:: `308882` - - - -==== kubernetes.event.reason - -A short machine-understandable string that gives the reason for generating this event - -[horizontal] -Data type:: keyword -Example value:: `SuccessfulCreate` - - -==== kubernetes.event.source_component - -The component that reported this event - -[horizontal] -Data type:: keyword -Example value:: `replication-controller` - - -==== kubernetes.event.firstTimestamp - -The time at which the event was first recorded - -[horizontal] -Data type:: date -Example value:: `2017-08-07 10:11:57.000000000 Z` - - -==== kubernetes.event.count - -The number of times this event has occurred - -[horizontal] -Data type:: integer -Example value:: `1` - - -==== kubernetes.event.type - -The type of event, `Normal` or `Warning`. New types could be added in the future. - -[horizontal] -Data type:: keyword -Example value:: `Normal` - -== OpenShift - -The namespace for openshift-logging specific metadata - -[horizontal] -Data type:: group - -=== openshift.labels - -Labels added by the Cluster Log Forwarder configuration - -[horizontal] -Data type:: group diff --git a/modules/cluster-logging-exported-fields-top-level-fields.adoc b/modules/cluster-logging-exported-fields-top-level-fields.adoc deleted file mode 100644 index 882c12ec5bfd..000000000000 --- a/modules/cluster-logging-exported-fields-top-level-fields.adoc +++ /dev/null @@ -1,116 +0,0 @@ -[id="cluster-logging-exported-fields-top-level-fields_{context}"] - -// Normally, the following title would be an H1 prefixed with an `=`. However, because the following content is auto-generated at https://github.com/ViaQ/documentation/blob/main/src/data_model/public/top-level.part.adoc and pasted here, it is more efficient to use it as-is with no modifications. Therefore, to "realign" the content, I am going to prefix the title with `==` and use `include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0]` in the assembly file. - -// DO NOT MODIFY THE FOLLOWING CONTENT. Instead, update https://github.com/ViaQ/documentation/blob/main/src/data_model/model/top-level.yaml and run `make` as instructed here: https://github.com/ViaQ/documentation - -//The top-level fields can be present in every record. The descriptions for fields that are optional begin with "Optional:" - - -The top level fields may be present in every record. - -== message - -The original log entry text, UTF-8 encoded. This field may be absent or empty if a non-empty `structured` field is present. See the description of `structured` for more. - -[horizontal] -Data type:: text -Example value:: `HAPPY` - -== structured - -Original log entry as a structured object. This field may be present if the forwarder was configured to parse structured JSON logs. If the original log entry was a valid structured log, this field will contain an equivalent JSON structure. Otherwise this field will be empty or absent, and the `message` field will contain the original log message. The `structured` field can have any subfields that are included in the log message, there are no restrictions defined here. - -[horizontal] -Data type:: group -Example value:: map[message:starting fluentd worker pid=21631 ppid=21618 worker=0 pid:21631 ppid:21618 worker:0] - -== @timestamp - -A UTC value that marks when the log payload was created or, if the creation time is not known, when the log payload was first collected. The “@” prefix denotes a field that is reserved for a particular use. By default, most tools look for “@timestamp” with ElasticSearch. - -[horizontal] -Data type:: date -Example value:: `2015-01-24 14:06:05.071000000 Z` - -== hostname - -The name of the host where this log message originated. In a Kubernetes cluster, this is the same as `kubernetes.host`. - -[horizontal] -Data type:: keyword - -== ipaddr4 - -The IPv4 address of the source server. Can be an array. - -[horizontal] -Data type:: ip - -== ipaddr6 - -The IPv6 address of the source server, if available. Can be an array. - -[horizontal] -Data type:: ip - -== level - -The logging level from various sources, including `rsyslog(severitytext property)`, a Python logging module, and others. - -The following values come from link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`syslog.h`], and are preceded by their http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[numeric equivalents]: - -* `0` = `emerg`, system is unusable. -* `1` = `alert`, action must be taken immediately. -* `2` = `crit`, critical conditions. -* `3` = `err`, error conditions. -* `4` = `warn`, warning conditions. -* `5` = `notice`, normal but significant condition. -* `6` = `info`, informational. -* `7` = `debug`, debug-level messages. - -The two following values are not part of `syslog.h` but are widely used: - -* `8` = `trace`, trace-level messages, which are more verbose than `debug` messages. -* `9` = `unknown`, when the logging system gets a value it doesn't recognize. - -Map the log levels or priorities of other logging systems to their nearest match in the preceding list. For example, from link:https://docs.python.org/2.7/library/logging.html#logging-levels[python logging], you can match `CRITICAL` with `crit`, `ERROR` with `err`, and so on. - -[horizontal] -Data type:: keyword -Example value:: `info` - -== pid - -The process ID of the logging entity, if available. - -[horizontal] -Data type:: keyword - -== service - -The name of the service associated with the logging entity, if available. For example, syslog's `APP-NAME` and rsyslog's `programname` properties are mapped to the service field. - -[horizontal] -Data type:: keyword - -== tags - -Optional. An operator-defined list of tags placed on each log by the collector or normalizer. The payload can be a string with whitespace-delimited string tokens or a JSON list of string tokens. - -[horizontal] -Data type:: text - -== file - -The path to the log file from which the collector reads this log entry. Normally, this is a path in the `/var/log` file system of a cluster node. - -[horizontal] -Data type:: text - -== offset - -The offset value. Can represent bytes to the start of the log line in the file (zero- or one-based), or log line numbers (zero- or one-based), so long as the values are strictly monotonically increasing in the context of a single log file. The values are allowed to wrap, representing a new version of the log file (rotation). - -[horizontal] -Data type:: long diff --git a/modules/cluster-logging-feature-reference.adoc b/modules/cluster-logging-feature-reference.adoc deleted file mode 100644 index 1d0dbaba9d00..000000000000 --- a/modules/cluster-logging-feature-reference.adoc +++ /dev/null @@ -1,170 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: REFERENCE -[id="cluster-logging-about-vector_{context}"] -= About Vector -Vector is a log collector offered as an alternative to Fluentd for the {logging}. - -The following outputs are supported: - -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `kafka`. A Kafka broker. The `kafka` output can use an unsecured or TLS connection. - -* `loki`. Loki, a horizontally scalable, highly available, multitenant log aggregation system. - - -[id="cluster-logging-vector-enable_{context}"] -== Enabling Vector -Vector is not enabled by default. Use the following steps to enable Vector on your {product-title} cluster. - -[IMPORTANT] -==== -Vector does not support FIPS Enabled Clusters. -==== - -.Prerequisites - -* {product-title}: {product-version} -* {logging-title-uc}: 5.4 -* FIPS disabled - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- - -. Add a `logging.openshift.io/preview-vector-collector: enabled` annotation to the `ClusterLogging` custom resource (CR). - -. Add `vector` as a collection type to the `ClusterLogging` custom resource (CR). - -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: "ClusterLogging" - metadata: - name: "instance" - namespace: "openshift-logging" - annotations: - logging.openshift.io/preview-vector-collector: enabled - spec: - collection: - logs: - type: "vector" - vector: {} ----- - -[role="_additional-resources"] -.Additional resources -* link:https://vector.dev/docs/about/what-is-vector/[Vector Documentation] - -== Collector features - -.Log Sources -[options="header"] -|=============================================================== -| Feature | Fluentd | Vector -| App container logs | ✓ | ✓ -| App-specific routing | ✓ | ✓ -| App-specific routing by namespace | ✓ | ✓ -| Infra container logs | ✓ | ✓ -| Infra journal logs | ✓ | ✓ -| Kube API audit logs | ✓ | ✓ -| OpenShift API audit logs | ✓ | ✓ -| Open Virtual Network (OVN) audit logs| ✓ | ✓ -|=============================================================== - -.Outputs -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Elasticsearch v5-v7 | ✓ | ✓ -| Fluent forward | ✓ | -| Syslog RFC3164 | ✓ | ✓ (Logging 5.7+) -| Syslog RFC5424 | ✓ | ✓ (Logging 5.7+) -| Kafka | ✓ | ✓ -| Cloudwatch | ✓ | ✓ -| Loki | ✓ | ✓ -| HTTP | ✓ | ✓ (Logging 5.7+) -|========================================================== - -.Authorization and Authentication -[options="header"] -|================================================================= -| Feature | Fluentd | Vector -| Elasticsearch certificates | ✓ | ✓ -| Elasticsearch username / password | ✓ | ✓ -| Cloudwatch keys | ✓ | ✓ -| Cloudwatch STS | ✓ | ✓ -| Kafka certificates | ✓ | ✓ -| Kafka username / password | ✓ | ✓ -| Kafka SASL | ✓ | ✓ -| Loki bearer token | ✓ | ✓ -|================================================================= - -.Normalizations and Transformations -[options="header"] -|============================================================================ -| Feature | Fluentd | Vector -| Viaq data model - app | ✓ | ✓ -| Viaq data model - infra | ✓ | ✓ -| Viaq data model - infra(journal) | ✓ | ✓ -| Viaq data model - Linux audit | ✓ | ✓ -| Viaq data model - kube-apiserver audit | ✓ | ✓ -| Viaq data model - OpenShift API audit | ✓ | ✓ -| Viaq data model - OVN | ✓ | ✓ -| Loglevel Normalization | ✓ | ✓ -| JSON parsing | ✓ | ✓ -| Structured Index | ✓ | ✓ -| Multiline error detection | ✓ | ✓ -| Multicontainer / split indices | ✓ | ✓ -| Flatten labels | ✓ | ✓ -| CLF static labels | ✓ | ✓ -|============================================================================ - -.Tuning -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Fluentd readlinelimit | ✓ | -| Fluentd buffer | ✓ | -| - chunklimitsize | ✓ | -| - totallimitsize | ✓ | -| - overflowaction | ✓ | -| - flushthreadcount | ✓ | -| - flushmode | ✓ | -| - flushinterval | ✓ | -| - retrywait | ✓ | -| - retrytype | ✓ | -| - retrymaxinterval | ✓ | -| - retrytimeout | ✓ | -|========================================================== - -.Visibility -[options="header"] -|===================================================== -| Feature | Fluentd | Vector -| Metrics | ✓ | ✓ -| Dashboard | ✓ | ✓ -| Alerts | ✓ | -|===================================================== - -.Miscellaneous -[options="header"] -|=========================================================== -| Feature | Fluentd | Vector -| Global proxy support | ✓ | ✓ -| x86 support | ✓ | ✓ -| ARM support | ✓ | ✓ -ifndef::openshift-rosa[] -| {ibmpowerProductName} support | ✓ | ✓ -| {ibmzProductName} support | ✓ | ✓ -endif::openshift-rosa[] -| IPv6 support | ✓ | ✓ -| Log event buffering | ✓ | -| Disconnected Cluster | ✓ | ✓ -|=========================================================== diff --git a/modules/cluster-logging-forwarding-about.adoc b/modules/cluster-logging-forwarding-about.adoc deleted file mode 100644 index efe67f586b8f..000000000000 --- a/modules/cluster-logging-forwarding-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-forwarding-about_{context}"] -= About log forwarding - -By default, the {logging-title} sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. diff --git a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc b/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc deleted file mode 100644 index 6469db82413d..000000000000 --- a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-json-logs-to-the-default-elasticsearch_{context}"] -= Forwarding JSON logs to the Elasticsearch log store - -For an Elasticsearch log store, if your JSON log entries _follow different schemas_, configure the `ClusterLogForwarder` custom resource (CR) to group each JSON schema into a single output definition. This way, Elasticsearch uses a separate index for each schema. - -[IMPORTANT] -==== -Because forwarding different schemas to the same index can cause type conflicts and cardinality problems, you must perform this configuration before you forward data to the Elasticsearch store. - -To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Procedure - -. Add the following snippet to your `ClusterLogForwarder` CR YAML file. -+ -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: - structuredTypeName: -pipelines: -- inputRefs: - - application - outputRefs: default - parse: json ----- - -. Optional: Use `structuredTypeKey` to specify one of the log record fields, as described in the preceding topic, xref:../logging/cluster-logging-enabling-json-logging.adoc#cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_cluster-logging-enabling-json-logging[Configuring JSON log data for Elasticsearch]. Otherwise, remove this line. - -. Optional: Use `structuredTypeName` to specify a ``, as described in the preceding topic, xref:../logging/cluster-logging-enabling-json-logging.adoc#cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_cluster-logging-enabling-json-logging[Configuring JSON log data for Elasticsearch]. Otherwise, remove this line. -+ -[IMPORTANT] -==== -To parse JSON logs, you must set either `structuredTypeKey` or `structuredTypeName`, or both `structuredTypeKey` and `structuredTypeName`. -==== -+ -. For `inputRefs`, specify which log types to forward by using that pipeline, such as `application,` `infrastructure`, or `audit`. - -. Add the `parse: json` element to pipelines. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -The Red Hat OpenShift Logging Operator redeploys the Fluentd pods. However, if they do not redeploy, delete the Fluentd pods to force them to redeploy. -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/cluster-logging-forwarding-lokistack.adoc b/modules/cluster-logging-forwarding-lokistack.adoc deleted file mode 100644 index 1d9455537eac..000000000000 --- a/modules/cluster-logging-forwarding-lokistack.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-lokistack_{context}"] -= Forwarding logs to LokiStack - -To configure log forwarding to the LokiStack gateway, you must create a ClusterLogging custom resource (CR). - -.Prerequisites - -* {logging-title-uc}: 5.5 and later -* `Loki Operator` Operator - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogging` custom resource (CR): - -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- diff --git a/modules/cluster-logging-forwarding-separate-indices.adoc b/modules/cluster-logging-forwarding-separate-indices.adoc deleted file mode 100644 index f4fae73a0eca..000000000000 --- a/modules/cluster-logging-forwarding-separate-indices.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-external -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-separate-indices_{context}"] -= Forwarding JSON logs from containers in the same pod to separate indices - -You can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. Logs are written to indices with a prefix of `app-`. It is recommended that Elasticsearch be configured with aliases to accommodate this. - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -.Prerequisites - -* {logging-title-uc}: 5.5 - -.Procedure -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputDefaults: - elasticsearch: - enableStructuredContainerLogs: true <1> - pipelines: - - inputRefs: - - application - name: application-logs - outputRefs: - - default - parse: json ----- -<1> Enables multi-container outputs. - -. Create or edit a YAML file that defines the `Pod` CR object: -+ -[source,yaml] ----- - apiVersion: v1 - kind: Pod - metadata: - annotations: - containerType.logging.openshift.io/heavy: heavy <1> - containerType.logging.openshift.io/low: low - spec: - containers: - - name: heavy <2> - image: heavyimage - - name: low - image: lowimage ----- -<1> Format: `containerType.logging.openshift.io/: ` -<2> Annotation names must match container names - -[WARNING] -==== -This configuration might significantly increase the number of shards on the cluster. -==== - -.Additional Resources -* link:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/[Kubernetes Annotations] diff --git a/modules/cluster-logging-json-log-forwarding.adoc b/modules/cluster-logging-json-log-forwarding.adoc deleted file mode 100644 index a080fb425f25..000000000000 --- a/modules/cluster-logging-json-log-forwarding.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[id="cluster-logging-json-log-forwarding_{context}"] -= Parsing JSON logs - -Logs including JSON logs are usually represented as a string inside the `message` field. That makes it hard for users to query specific fields inside a JSON document. OpenShift Logging's Log Forwarding API enables you to parse JSON logs into a structured object and forward them to either OpenShift Logging-managed Elasticsearch or any other third-party system supported by the Log Forwarding API. - -To illustrate how this works, suppose that you have the following structured JSON log entry. - -.Example structured JSON log entry -[source,yaml] ----- -{"level":"info","name":"fred","home":"bedrock"} ----- - -Normally, the `ClusterLogForwarder` custom resource (CR) forwards that log entry in the `message` field. The `message` field contains the JSON-quoted string equivalent of the JSON log entry, as shown in the following example. - -.Example `message` field -[source,yaml] ----- -{"message":"{\"level\":\"info\",\"name\":\"fred\",\"home\":\"bedrock\"", - "more fields..."} ----- - -To enable parsing JSON log, you add `parse: json` to a pipeline in the `ClusterLogForwarder` CR, as shown in the following example. - -.Example snippet showing `parse: json` -[source,yaml] ----- -pipelines: -- inputRefs: [ application ] - outputRefs: myFluentd - parse: json ----- - -When you enable parsing JSON logs by using `parse: json`, the CR copies the JSON-structured log entry in a `structured` field, as shown in the following example. This does not modify the original `message` field. - -.Example `structured` output containing the structured JSON log entry -[source,yaml] ----- -{"structured": { "level": "info", "name": "fred", "home": "bedrock" }, - "more fields..."} ----- - -[IMPORTANT] -==== -If the log entry does not contain valid structured JSON, the `structured` field will be absent. -==== - -To enable parsing JSON logs for specific logging platforms, see xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems]. diff --git a/modules/cluster-logging-json-logging-about.adoc b/modules/cluster-logging-json-logging-about.adoc deleted file mode 100644 index db6c394bdc8a..000000000000 --- a/modules/cluster-logging-json-logging-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-json-logging-about_{context}"] -= About JSON {product-title} Logging - -You can use JSON logging to configure the Log Forwarding API to parse JSON strings into a structured object. You can perform the following tasks: - -* Parse JSON logs -* Configure JSON log data for Elasticsearch -* Forward JSON logs to the Elasticsearch log store diff --git a/modules/cluster-logging-kibana-limits.adoc b/modules/cluster-logging-kibana-limits.adoc deleted file mode 100644 index aac3c3a66374..000000000000 --- a/modules/cluster-logging-kibana-limits.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-kibana-limits_{context}"] -= Configure the CPU and memory limits for the log visualizer - -You can adjust both the CPU and memory limits for the pod that hosts the log visualizer. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - visualization: - type: "kibana" - kibana: - replicas: - resources: <1> - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - proxy: <2> - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi ----- -<1> Specify the CPU and memory limits to allocate for each node. -<2> Specify the CPU and memory limits to allocate to the Kibana proxy. diff --git a/modules/cluster-logging-kibana-scaling.adoc b/modules/cluster-logging-kibana-scaling.adoc deleted file mode 100644 index 9f56f0f93f23..000000000000 --- a/modules/cluster-logging-kibana-scaling.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-kibana-scaling_{context}"] -= Scaling redundancy for the log visualizer nodes - -You can scale the pod that hosts the log visualizer for redundancy. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - visualization: - type: "kibana" - kibana: - replicas: 1 <1> ----- -<1> Specify the number of Kibana nodes. diff --git a/modules/cluster-logging-kibana-tolerations.adoc b/modules/cluster-logging-kibana-tolerations.adoc deleted file mode 100644 index f74d68a9061b..000000000000 --- a/modules/cluster-logging-kibana-tolerations.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-kibana-tolerations_{context}"] -= Using tolerations to control the log visualizer pod placement - -You can control the node where the log visualizer pod runs and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log visualizer pod through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the Kibana pod can run on that node. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want to schedule the log visualizer pod: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 kibana=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `kibana`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and remove existing pods -that do not match. - -. Edit the `visualization` section of the `ClusterLogging` CR to configure a toleration for the Kibana pod: -+ -[source,yaml] ----- - visualization: - type: "kibana" - kibana: - tolerations: - - key: "kibana" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. diff --git a/modules/cluster-logging-log-store-status-comp.adoc b/modules/cluster-logging-log-store-status-comp.adoc deleted file mode 100644 index f9f96f74f824..000000000000 --- a/modules/cluster-logging-log-store-status-comp.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-elasticsearch-status-comp_{context}"] -= Viewing the status of the log store components - -You can view the status for a number of the log store components. - -Elasticsearch indices:: -You can view the status of the Elasticsearch indices. - -. Get the name of an Elasticsearch pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of the indices: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -- indices ----- -+ -.Example output -[source,terminal] ----- -Defaulting container name to elasticsearch. -Use 'oc describe pod/elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -n openshift-logging' to see all of the containers in this pod. - -green open infra-000002 S4QANnf1QP6NgCegfnrnbQ 3 1 119926 0 157 78 -green open audit-000001 8_EQx77iQCSTzFOXtxRqFw 3 1 0 0 0 0 -green open .security iDjscH7aSUGhIdq0LheLBQ 1 1 5 0 0 0 -green open .kibana_-377444158_kubeadmin yBywZ9GfSrKebz5gWBZbjw 3 1 1 0 0 0 -green open infra-000001 z6Dpe__ORgiopEpW6Yl44A 3 1 871000 0 874 436 -green open app-000001 hIrazQCeSISewG3c2VIvsQ 3 1 2453 0 3 1 -green open .kibana_1 JCitcBMSQxKOvIq6iQW6wg 1 1 0 0 0 0 -green open .kibana_-1595131456_user1 gIYFIEGRRe-ka0W3okS-mQ 3 1 1 0 0 0 ----- - - -Log store pods:: -You can view the status of the pods that host the log store. - -. Get the name of a pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of a pod: -+ -[source,terminal] ----- -$ oc describe pod elasticsearch-cdm-1godmszn-1-6f8495-vp4lw ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... -Status: Running - -.... - -Containers: - elasticsearch: - Container ID: cri-o://b7d44e0a9ea486e27f47763f5bb4c39dfd2 - State: Running - Started: Mon, 08 Jun 2020 10:17:56 -0400 - Ready: True - Restart Count: 0 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - - proxy: - Container ID: cri-o://3f77032abaddbb1652c116278652908dc01860320b8a4e741d06894b2f8f9aa1 - State: Running - Started: Mon, 08 Jun 2020 10:18:38 -0400 - Ready: True - Restart Count: 0 - -.... - -Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True - -.... - -Events: ----- - -Log storage pod deployment configuration:: -You can view the status of the log store deployment configuration. - -. Get the name of a deployment configuration: -+ -[source,terminal] ----- -$ oc get deployment --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-1gon-1 -deployment.extensions/elasticsearch-cdm-1gon-2 -deployment.extensions/elasticsearch-cdm-1gon-3 ----- - -. Get the deployment configuration status: -+ -[source,terminal] ----- -$ oc describe deployment elasticsearch-cdm-1gon-1 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Progressing Unknown DeploymentPaused - Available True MinimumReplicasAvailable - -.... - -Events: ----- - -Log store replica set:: -You can view the status of the log store replica set. - -. Get the name of a replica set: -+ -[source,terminal] ----- -$ oc get replicaSet --selector component=elasticsearch -o name - -replicaset.extensions/elasticsearch-cdm-1gon-1-6f8495 -replicaset.extensions/elasticsearch-cdm-1gon-2-5769cf -replicaset.extensions/elasticsearch-cdm-1gon-3-f66f7d ----- - -. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaSet elasticsearch-cdm-1gon-1-6f8495 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8@sha256:4265742c7cdd85359140e2d7d703e4311b6497eec7676957f455d6908e7b1c25 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Events: ----- diff --git a/modules/cluster-logging-log-store-status-viewing.adoc b/modules/cluster-logging-log-store-status-viewing.adoc deleted file mode 100644 index bcc1ac8a83f6..000000000000 --- a/modules/cluster-logging-log-store-status-viewing.adoc +++ /dev/null @@ -1,247 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-log-store.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-log-store-comp-viewing_{context}"] -= Viewing the status of the log store - -You can view the status of your log store. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. To view the status: - -.. Get the name of the log store instance: -+ -[source,terminal] ----- -$ oc get Elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -elasticsearch 5h9m ----- - -.. Get the log store status: -+ -[source,terminal] ----- -$ oc get Elasticsearch -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get Elasticsearch elasticsearch -n openshift-logging -o yaml ----- -+ -The output includes information similar to the following: -+ -.Example output -[source,terminal] ----- -status: <1> - cluster: <2> - activePrimaryShards: 30 - activeShards: 60 - initializingShards: 0 - numDataNodes: 3 - numNodes: 3 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterHealth: "" - conditions: [] <3> - nodes: <4> - - deploymentName: elasticsearch-cdm-zjf34ved-1 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-2 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-3 - upgradeStatus: {} - pods: <5> - client: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - data: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - master: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - shardAllocationEnabled: all ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> The status of the log store: -+ -* The number of active primary shards. -* The number of active shards. -* The number of shards that are initializing. -* The number of log store data nodes. -* The total number of log store nodes. -* The number of pending tasks. -* The log store status: `green`, `red`, `yellow`. -* The number of unassigned shards. -<3> Any status conditions, if present. The log store status indicates the reasons from the scheduler if a pod could not be placed. Any events related to the following conditions are shown: -* Container Waiting for both the log store and proxy containers. -* Container Terminated for both the log store and proxy containers. -* Pod unschedulable. -Also, a condition is shown for a number of issues; see *Example condition messages*. -<4> The log store nodes in the cluster, with `upgradeStatus`. -<5> The log store client, data, and master pods in the cluster, listed under 'failed`, `notReady`, or `ready` state. - -[id="cluster-logging-elasticsearch-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status` section of the Elasticsearch instance. - -// https://github.com/openshift/elasticsearch-operator/pull/92 - -The following status message indicates that a node has exceeded the configured low watermark, and no shard will be allocated to this node. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that a node has exceeded the configured high watermark, and shards will be relocated to other nodes. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that the log store node selector in the CR does not match any nodes in the cluster: - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-04-10T02:26:24Z - message: '0/8 nodes are available: 8 node(s) didn''t match node selector.' - reason: Unschedulable - status: "True" - type: Unschedulable ----- - -The following status message indicates that the log store CR uses a non-existent persistent volume claim (PVC). - -[source,yaml] ----- -status: - nodes: - - conditions: - - last Transition Time: 2019-04-10T05:55:51Z - message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - reason: Unschedulable - status: True - type: Unschedulable ----- - -The following status message indicates that your log store cluster does not have enough nodes to support the redundancy policy. - -[source,yaml] ----- -status: - clusterHealth: "" - conditions: - - lastTransitionTime: 2019-04-17T20:01:31Z - message: Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or - add more nodes with data roles - reason: Invalid Settings - status: "True" - type: InvalidRedundancy ----- - -This status message indicates your cluster has too many control plane nodes: - -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: '2019-04-17T20:12:34Z' - message: >- - Invalid master nodes count. Please ensure there are no more than 3 total - nodes with master roles - reason: Invalid Settings - status: 'True' - type: InvalidMasters ----- - - -The following status message indicates that Elasticsearch storage does not support the change you tried to make. - -For example: -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: "2021-05-07T01:05:13Z" - message: Changing the storage structure for a custom resource is not supported - reason: StorageStructureChangeIgnored - status: 'True' - type: StorageStructureChangeIgnored ----- - -The `reason` and `type` fields specify the type of unsupported change: - -`StorageClassNameChangeIgnored`:: Unsupported change to the storage class name. -`StorageSizeChangeIgnored`:: Unsupported change the storage size. -`StorageStructureChangeIgnored`:: Unsupported change between ephemeral and persistent storage structures. -+ -[IMPORTANT] -==== -If you try to configure the `ClusterLogging` custom resource (CR) to switch from ephemeral to persistent storage, the OpenShift Elasticsearch Operator creates a persistent volume claim (PVC) but does not create a persistent volume (PV). To clear the `StorageStructureChangeIgnored` status, you must revert the change to the `ClusterLogging` CR and delete the PVC. -==== diff --git a/modules/cluster-logging-logcli-reference.adoc b/modules/cluster-logging-logcli-reference.adoc deleted file mode 100644 index 272aa5886ac7..000000000000 --- a/modules/cluster-logging-logcli-reference.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: REFERENCE -[id="logging-logcli-about_{context}"] - -= Querying Loki - -You can use Loki's command-line interface `logcli` to query logs. - -.Example Application Log Query -[source,terminal] ----- -$ oc extract cm/lokistack-sample-ca-bundle --to=lokistack --confirm -$ cat lokistack/*.crt >lokistack_ca.crt -$ logcli -o raw --bearer-token="${bearer_token}" --ca-cert="lokistack_ca.crt" --addr xxxxxx ----- - -.Example Infrastructure Log Query -[source,terminal] ----- -$ logcli --bearer-token="$(oc whoami -t)" --addr https://lokistack-dev-openshift-logging.apps.devcluster.openshift.com/api/logs/v1/infrastructure labels ----- - -.Example Audit log Query -[source,terminal] ----- -$ logcli --bearer-token="$(oc whoami -t)" --addr https://lokistack-dev-openshift-logging.apps.devcluster.openshift.com/api/logs/v1/audit labels ----- - -.Additional Resources -* link:https://grafana.com/docs/loki/latest/tools/logcli/[LogCLI Documentation] diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc deleted file mode 100644 index 5af44f66a99e..000000000000 --- a/modules/cluster-logging-logstore-limits.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-logstore-limits_{context}"] -= Configuring CPU and memory requests for the log store - -Each component specification allows for adjustments to both the CPU and memory requests. -You should not have to manually adjust these values as the OpenShift Elasticsearch -Operator sets values sufficient for your environment. - -[NOTE] -==== -In large-scale clusters, the default memory limit for the Elasticsearch proxy container might not be sufficient, causing the proxy container to be OOMKilled. If you experience this issue, increase the memory requests and limits for the Elasticsearch proxy. -==== - -Each Elasticsearch node can operate with a lower memory setting though this is *not* recommended for production deployments. -For production use, you should have no less than the default 16Gi allocated to each pod. Preferably you should allocate as much as possible, up to 64Gi per pod. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -.... -spec: - logStore: - type: "elasticsearch" - elasticsearch:<1> - resources: - limits: <2> - memory: "32Gi" - requests: <3> - cpu: "1" - memory: "16Gi" - proxy: <4> - resources: - limits: - memory: 100Mi - requests: - memory: 100Mi ----- -<1> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, -the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<2> The maximum amount of resources a pod can use. -<3> The minimum resources required to schedule a pod. -<4> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that are sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. - -When adjusting the amount of Elasticsearch memory, the same value should be used for both `requests` and `limits`. - -For example: - -[source,yaml] ----- - resources: - limits: <1> - memory: "32Gi" - requests: <2> - cpu: "8" - memory: "32Gi" ----- -<1> The maximum amount of the resource. -<2> The minimum amount required. - -Kubernetes generally adheres the node configuration and does not allow Elasticsearch to use the specified limits. -Setting the same value for the `requests` and `limits` ensures that Elasticsearch can use the memory you want, assuming the node has the memory available. diff --git a/modules/cluster-logging-loki-about.adoc b/modules/cluster-logging-loki-about.adoc deleted file mode 100644 index da91f15ee1a9..000000000000 --- a/modules/cluster-logging-loki-about.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: CONCEPT -[id="about-logging-loki_{context}"] -= About the LokiStack - -In {logging} documentation, *LokiStack* refers to the {logging} supported combination of Loki, and web proxy with {product-title} authentication integration. LokiStack's proxy uses {product-title} authentication to enforce multi-tenancy. *Loki* refers to the log store as either the individual component or an external store. - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system currently offered as an alternative to Elasticsearch as a log store for the {logging}. Elasticsearch indexes incoming log records completely during ingestion. Loki only indexes a few fixed labels during ingestion, and defers more complex parsing until after the logs have been stored. This means Loki can collect logs more quickly. As with Elasticsearch, you can query Loki link:https://grafana.com/docs/loki/latest/[using JSON paths or regular expressions]. - -[id="deployment-sizing_{context}"] -== Deployment Sizing -Sizing for Loki follows the format of `N.__` where the value `` is number of instances and `` specifies performance capabilities. - -[NOTE] -==== -1x.extra-small is for demo purposes only, and is not supported. -==== - -.Loki Sizing -[options="header"] -|======================================================================================== -| | 1x.extra-small | 1x.small | 1x.medium -| *Data transfer* | Demo use only. | 500GB/day | 2TB/day -| *Queries per second (QPS)* | Demo use only. | 25-50 QPS at 200ms | 25-75 QPS at 200ms -| *Replication factor* | None | 2 | 3 -| *Total CPU requests* | 5 vCPUs | 36 vCPUs | 54 vCPUs -| *Total Memory requests* | 7.5Gi | 63Gi | 139Gi -| *Total Disk requests* | 150Gi | 300Gi | 450Gi -|======================================================================================== - -[id="CRD-API-support_{context}"] -== Supported API Custom Resource Definitions -LokiStack development is ongoing, not all APIs are supported currently supported. - -[options="header"] -|===================================================================== -| CustomResourceDefinition (CRD)| ApiVersion | Support state -| LokiStack | lokistack.loki.grafana.com/v1 | Supported in 5.5 -| RulerConfig | rulerconfig.loki.grafana/v1beta1 | Technology Preview -| AlertingRule | alertingrule.loki.grafana/v1beta1 | Technology Preview -| RecordingRule | recordingrule.loki.grafana/v1beta1 | Technology Preview -|===================================================================== - -:FeatureName: Usage of `RulerConfig`, `AlertingRule` and `RecordingRule` custom resource definitions (CRDs). -include::snippets/technology-preview.adoc[] diff --git a/modules/cluster-logging-loki-deploy.adoc b/modules/cluster-logging-loki-deploy.adoc deleted file mode 100644 index beda913b63b3..000000000000 --- a/modules/cluster-logging-loki-deploy.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: PROCEDURE -[id="logging-loki-deploy_{context}"] -= Deploying the LokiStack - -ifndef::openshift-rosa,openshift-dedicated[] -You can use the {product-title} web console to deploy the LokiStack. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -You can deploy the LokiStack by using the {product-title} {cluster-manager-url}. -endif::[] - -.Prerequisites - -* {logging-title-uc} Operator 5.5 and later -* Supported Log Store (AWS S3, Google Cloud Storage, Azure, Swift, Minio, OpenShift Data Foundation) - -.Procedure - -. Install the `Loki Operator` Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *OperatorHub*. -endif::[] - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -.. Under *Installation Mode*, select *All namespaces on the cluster*. - -.. Under *Installed Namespace*, select *openshift-operators-redhat*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` namespace might contain Community Operators, which are untrusted and might publish a metric with the same name as -ifndef::openshift-rosa[] -an {product-title} metric, which would cause conflicts. -endif::[] -ifdef::openshift-rosa[] -a {product-title} metric, which would cause conflicts. -endif::[] - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that you installed the Loki Operator. Visit the *Operators* → *Installed Operators* page and look for *Loki Operator*. - -.. Ensure that *Loki Operator* is listed with *Status* as *Succeeded* in all the projects. -+ -. Create a `Secret` YAML file that uses the `access_key_id` and `access_key_secret` fields to specify your AWS credentials and `bucketnames`, `endpoint` and `region` to define the object storage location. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: logging-loki-s3 - namespace: openshift-logging -stringData: - access_key_id: AKIAIOSFODNN7EXAMPLE - access_key_secret: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - bucketnames: s3-bucket-name - endpoint: https://s3.eu-central-1.amazonaws.com - region: eu-central-1 ----- -+ -. Create the `LokiStack` custom resource: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - size: 1x.small - storage: - schemas: - - version: v12 - effectiveDate: "2022-06-01" - secret: - name: logging-loki-s3 - type: s3 - storageClassName: gp3-csi <1> - tenants: - mode: openshift-logging ----- -<1> Or `gp2-csi`. -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f logging-loki.yaml ----- -+ -. Create or edit a `ClusterLogging` CR: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f cr-lokistack.yaml ----- -+ -. Enable the RedHat OpenShift Logging Console Plugin: -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] -.. Select the *RedHat OpenShift Logging* Operator. -.. Under Console plugin, click *Disabled*. -.. Select *Enable* and then *Save*. This change will restart the 'openshift-console' pods. -.. After the pods restart, you will receive a notification that a web console update is available, prompting you to refresh. -.. After refreshing the web console, click *Observe* from the left main menu. A new option for *Logs* will be available to you. - -[NOTE] -==== -This plugin is only available on {product-title} 4.10 and later. -==== diff --git a/modules/cluster-logging-loki-tech-preview.adoc b/modules/cluster-logging-loki-tech-preview.adoc deleted file mode 100644 index 4d9a7dbf1864..000000000000 --- a/modules/cluster-logging-loki-tech-preview.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-loki-tech-preview_{context}"] -:FeatureName: Loki Operator -include::snippets/technology-preview.adoc[] - -[id="cluster-logging-about-loki"] -= About Loki - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system currently offered as an alternative to Elasticsearch as a log store for the {logging}. - -[role="_additional-resources"] -.Additional resources -* link:https://grafana.com/docs/loki/latest/[Loki Documentation] - -== Deploying the Lokistack -You can use the {product-title} web console to install the Loki Operator. - -.Prerequisites - -* {product-title}: {product-version} -* {logging-title-uc}: 5.4 - -To install the Loki Operator using the {product-title} web console: - -. Install the Loki Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -.. Under *Installation Mode*, select *All namespaces on the cluster*. - -.. Under *Installed Namespace*, select *openshift-operators-redhat*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` -namespace might contain Community Operators, which are untrusted and could publish -a metric with the same name as an {product-title} metric, which would cause -conflicts. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. -You must select this option to ensure that cluster monitoring -scrapes the `openshift-operators-redhat` namespace. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that you installed the Loki Operator. Visit the *Operators* → *Installed Operators* page and look for "Loki Operator." - -.. Ensure that *Loki Operator* is listed in all the projects whose *Status* is *Succeeded*. diff --git a/modules/cluster-logging-maintenance-support-about.adoc b/modules/cluster-logging-maintenance-support-about.adoc deleted file mode 100644 index f71a4c5c6520..000000000000 --- a/modules/cluster-logging-maintenance-support-about.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-maintenance-support.adoc - -:_content-type: CONCEPT -[id="cluster-logging-maintenance-support-about_{context}"] -= About unsupported configurations - -The supported way of configuring the {logging-title} is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. - -[NOTE] -==== -If you _must_ perform configurations not described in the {product-title} documentation, you _must_ set your Red Hat OpenShift Logging Operator or OpenShift Elasticsearch Operator to *Unmanaged*. An unmanaged OpenShift Logging environment is _not supported_ and does not receive updates until you return OpenShift Logging to *Managed*. -==== diff --git a/modules/cluster-logging-maintenance-support-list.adoc b/modules/cluster-logging-maintenance-support-list.adoc deleted file mode 100644 index 7cfc808389f4..000000000000 --- a/modules/cluster-logging-maintenance-support-list.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-maintenance-support.adoc - -[id="cluster-logging-maintenance-support-list_{context}"] -= Unsupported configurations - -You must set the Red Hat OpenShift Logging Operator to the unmanaged state to modify the following components: - -* The `Elasticsearch` CR - -* The Kibana deployment - -* The `fluent.conf` file - -* The Fluentd daemon set - -You must set the OpenShift Elasticsearch Operator to the unmanaged state to modify the following component: - -* the Elasticsearch deployment files. - -Explicitly unsupported cases include: - -* *Configuring default log rotation*. You cannot modify the default log rotation configuration. - -* *Configuring the collected log location*. You cannot change the location of the log collector output file, which by default is `/var/log/fluentd/fluentd.log`. - -* *Throttling log collection*. You cannot throttle down the rate at which the logs are read in by the log collector. - -* *Configuring the logging collector using environment variables*. You cannot use environment variables to modify the log collector. - -* *Configuring how the log collector normalizes logs*. You cannot modify default log normalization. diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc deleted file mode 100644 index 3befd3648696..000000000000 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ /dev/null @@ -1,221 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-log-store.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-manual-rollout-rolling_{context}"] -= Performing an Elasticsearch rolling cluster restart - -Perform a rolling restart when you change the `elasticsearch` config map or any of the `elasticsearch-*` deployment configurations. - -Also, a rolling restart is recommended if the nodes on which an Elasticsearch pod runs requires a reboot. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -To perform a rolling cluster restart: - -. Change to the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Get the names of the Elasticsearch pods: -+ ----- -$ oc get pods -l component=elasticsearch- ----- - -. Scale down the collector pods so they stop sending new logs to Elasticsearch: -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "false"}}}}}' ----- - -. Perform a shard synced flush using the {product-title} link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool to ensure there are no pending operations waiting to be written to disk prior to shutting down: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -.Example output -[source,terminal] ----- -{"_shards":{"total":4,"successful":4,"failed":0},".security":{"total":2,"successful":2,"failed":0},".kibana_1":{"total":2,"successful":2,"failed":0}} ----- - -. Prevent shard balancing when purposely bringing down nodes using the {product-title} -link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -.Example output -[source,terminal] ----- -{"acknowledged":true,"persistent":{"cluster":{"routing":{"allocation":{"enable":"primaries"}}}},"transient": ----- - -. After the command is complete, for each deployment you have for an ES cluster: - -.. By default, the {product-title} Elasticsearch cluster blocks rollouts to their nodes. Use the following command to allow rollouts -and allow the pod to pick up the changes: -+ -[source,terminal] ----- -$ oc rollout resume deployment/ ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rollout resume deployment/elasticsearch-cdm-0-1 ----- -+ -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-0-1 resumed ----- -+ -A new pod is deployed. After the pod has a ready container, you can -move on to the next deployment. -+ -[source,terminal] ----- -$ oc get pods -l component=elasticsearch- ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6k 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-2-f799564cb-l9mj7 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-3-585968dc68-k7kjr 2/2 Running 0 22h ----- - -.. After the deployments are complete, reset the pod to disallow rollouts: -+ -[source,terminal] ----- -$ oc rollout pause deployment/ ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rollout pause deployment/elasticsearch-cdm-0-1 ----- -+ -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-0-1 paused ----- -+ -.. Check that the Elasticsearch cluster is in a `green` or `yellow` state: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ -[NOTE] -==== -If you performed a rollout on the Elasticsearch pod you used in the previous commands, the pod no longer exists and you need a new pod name here. -==== -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ -.Example output -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "yellow", <1> - "timed_out" : false, - "number_of_nodes" : 3, - "number_of_data_nodes" : 3, - "active_primary_shards" : 8, - "active_shards" : 16, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 1, - "delayed_unassigned_shards" : 0, - "number_of_pending_tasks" : 0, - "number_of_in_flight_fetch" : 0, - "task_max_waiting_in_queue_millis" : 0, - "active_shards_percent_as_number" : 100.0 -} ----- -<1> Make sure this parameter value is `green` or `yellow` before proceeding. - -. If you changed the Elasticsearch configuration map, repeat these steps for each Elasticsearch pod. - -. After all the deployments for the cluster have been rolled out, re-enable shard balancing: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -.Example output -[source,terminal] ----- -{ - "acknowledged" : true, - "persistent" : { }, - "transient" : { - "cluster" : { - "routing" : { - "allocation" : { - "enable" : "all" - } - } - } - } -} ----- - -. Scale up the collector pods so they send new logs to Elasticsearch. -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "true"}}}}}' ----- diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc deleted file mode 100644 index e8abfa298832..000000000000 --- a/modules/cluster-logging-must-gather-about.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/troubleshooting/cluster-logging-must-gather.adoc - -:_content-type: CONCEPT -[id="about-must-gather_{context}"] -= About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues. - -For your {logging}, `must-gather` collects the following information: - -* Project-level resources, including pods, configuration maps, service accounts, roles, role bindings, and events at the project level -* Cluster-level resources, including nodes, roles, and role bindings at the cluster level -* OpenShift Logging resources in the `openshift-logging` and `openshift-operators-redhat` namespaces, including health status for the log collector, the log store, and the log visualizer - -When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in the current working directory. diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc deleted file mode 100644 index 1747f51ffa6b..000000000000 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/troubleshooting/cluster-logging-must-gather.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-must-gather-collecting_{context}"] -= Collecting OpenShift Logging data - -You can use the `oc adm must-gather` CLI command to collect information about your {logging}. - -.Procedure - -To collect {logging} information with `must-gather`: - -. Navigate to the directory where you want to store the `must-gather` information. - -. Run the `oc adm must-gather` command against the OpenShift Logging image: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=quay.io/openshift/origin-cluster-logging-operator ----- -endif::openshift-origin[] -+ -The `must-gather` tool creates a new directory that starts with `must-gather.local` within the current directory. For example: -`must-gather.local.4157245944708210408`. - -. Create a compressed file from the `must-gather` directory that was just created. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar -cvaf must-gather.tar.gz must-gather.local.4157245944708210408 ----- - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/modules/cluster-logging-release-notes-5.2.z.adoc b/modules/cluster-logging-release-notes-5.2.z.adoc deleted file mode 100644 index 64c5227fa241..000000000000 --- a/modules/cluster-logging-release-notes-5.2.z.adoc +++ /dev/null @@ -1,324 +0,0 @@ -[id="cluster-logging-release-notes-5-2-10"] -== OpenShift Logging 5.2.10 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/[ OpenShift Logging Bug Fix Release 5.2.10]] - -[id="openshift-logging-5-2-10-bug-fixes"] -=== Bug fixes -* Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps.(https://issues.redhat.com/browse/LOG-2335[LOG-2335]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2475[LOG-2475]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2480[LOG-2480]) - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface, and were missing when the Operator was deployed from the command line. This fixes the issue by making this role and binding namespace scoped. (https://issues.redhat.com/browse/LOG-1972[LOG-1972]) - - -[id="openshift-logging-5-2-10-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* link:https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* link:https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* link:https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* link:https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* link:https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* link:https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* link:https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* link:https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* link:https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-2-9"] -== OpenShift Logging 5.2.9 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:1375[RHBA-2022:1375 OpenShift Logging Bug Fix Release 5.2.9]] - -[id="openshift-logging-5-2-9-bug-fixes"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2304[LOG-2304]) - -[id="cluster-logging-release-notes-5-2-8"] -== OpenShift Logging 5.2.8 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0728[RHSA-2022:0728 OpenShift Logging Bug Fix Release 5.2.8] - -[id="openshift-logging-5-2-8-bug-fixes"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link.(link:https://issues.redhat.com/browse/LOG-2180[LOG-2180]) - -[id="openshift-logging-5-2-8-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1930423[BZ-1930423] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2052539[BG-2052539] -==== - -[id="cluster-logging-release-notes-5-2-7"] -== OpenShift Logging 5.2.7 - -This release includes link:https://access.redhat.com/errata/RHBA-2022:0478[RHBA-2022:0478 OpenShift Logging Bug Fix Release 5.2.7] - -[id="openshift-logging-5-2-7-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-2000[LOG-2000]) - -* Before this update, if a persistent volume claim (PVC) already existed, Elasticsearch generated an error, "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2118[LOG-2118]) - -[id="openshift-logging-5-2-7-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -==== - -[id="cluster-logging-release-notes-5-2-6"] -== OpenShift Logging 5.2.6 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0230[RHSA-2022:0230 OpenShift Logging Bug Fix Release 5.2.6] - -[id="openshift-logging-5-2-6-bug-fixes"] -=== Bug fixes -* Before this update, the release did not include a filter change which caused fluentd to crash. With this update, the missing filter has been corrected. (link:https://issues.redhat.com/browse/LOG-2104[LOG-2104]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2101[LOG-2101]) - -[id="openshift-logging-5-2-6-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-2-5"] -== OpenShift Logging 5.2.5 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0043[RHSA-2022:0043 OpenShift Logging Bug Fix Release 5.2.5] - -[id="openshift-logging-5-2-5-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - - -[id="openshift-logging-5-2-5-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* link:https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* link:https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -* link:https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -==== - -[id="cluster-logging-release-notes-5-2-4"] -== OpenShift Logging 5.2.4 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:5127[RHSA-2021:5127 OpenShift Logging Bug Fix Release 5.2.4] - -[id="openshift-logging-5-2-4-bug-fixes"] -=== Bug fixes - -* Before this update records shipped via syslog would serialize a ruby hash encoding key/value pairs to contain a '=>' character, as well as replace tabs with "#11". This update serializes the message correctly as proper JSON. (link:https://issues.redhat.com/browse/LOG-1775[LOG-1775]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1970[LOG-1970]) - -* Before this update, Elasticsearch sometimes rejected messages when Log Forwarding was configured with multiple outputs. This happened because configuring one of the outputs modified message content to be a single message. With this update, Log Forwarding duplicates the messages for each output so that output-specific processing does not affect the other outputs. (link:https://issues.redhat.com/browse/LOG-1824[LOG-1824]) - - -[id="openshift-logging-5-2-4-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - -[id="cluster-logging-release-notes-5-2-3"] -== OpenShift Logging 5.2.3 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:4032[RHSA-2021:4032 OpenShift Logging Bug Fix Release 5.2.3] - -[id="openshift-logging-5-2-3-bug-fixes"] -=== Bug fixes - -* Before this update, some alerts did not include a namespace label. This omission doesn't comply with the OpenShift Monitoring Team's guidelines for writing alerting rules in OpenShift. With this update, all the alerts in Elasticsearch Operator include a namespace label and follow all the guidelines for writing alerting rules in OpenShift.(link:https://issues.redhat.com/browse/LOG-1857[LOG-1857]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry "level" based on the "level" field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1759[LOG-1759]) - -[id="openshift-logging-5-2-3-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-23369[CVE-2021-23369] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1948761[BZ-1948761] -* link:https://access.redhat.com/security/cve/CVE-2021-23383[CVE-2021-23383] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1956688[BZ-1956688] -* link:https://access.redhat.com/security/cve/CVE-2018-20673[CVE-2018-20673] -* link:https://access.redhat.com/security/cve/CVE-2019-5827[CVE-2019-5827] -* link:https://access.redhat.com/security/cve/CVE-2019-13750[CVE-2019-13750] -* link:https://access.redhat.com/security/cve/CVE-2019-13751[CVE-2019-13751] -* link:https://access.redhat.com/security/cve/CVE-2019-17594[CVE-2019-17594] -* link:https://access.redhat.com/security/cve/CVE-2019-17595[CVE-2019-17595] -* link:https://access.redhat.com/security/cve/CVE-2019-18218[CVE-2019-18218] -* link:https://access.redhat.com/security/cve/CVE-2019-19603[CVE-2019-19603] -* link:https://access.redhat.com/security/cve/CVE-2019-20838[CVE-2019-20838] -* link:https://access.redhat.com/security/cve/CVE-2020-12762[CVE-2020-12762] -* link:https://access.redhat.com/security/cve/CVE-2020-13435[CVE-2020-13435] -* link:https://access.redhat.com/security/cve/CVE-2020-14155[CVE-2020-14155] -* link:https://access.redhat.com/security/cve/CVE-2020-16135[CVE-2020-16135] -* link:https://access.redhat.com/security/cve/CVE-2020-24370[CVE-2020-24370] -* link:https://access.redhat.com/security/cve/CVE-2021-3200[CVE-2021-3200] -* link:https://access.redhat.com/security/cve/CVE-2021-3426[CVE-2021-3426] -* link:https://access.redhat.com/security/cve/CVE-2021-3445[CVE-2021-3445] -* link:https://access.redhat.com/security/cve/CVE-2021-3572[CVE-2021-3572] -* link:https://access.redhat.com/security/cve/CVE-2021-3580[CVE-2021-3580] -* link:https://access.redhat.com/security/cve/CVE-2021-3778[CVE-2021-3778] -* link:https://access.redhat.com/security/cve/CVE-2021-3796[CVE-2021-3796] -* link:https://access.redhat.com/security/cve/CVE-2021-3800[CVE-2021-3800] -* link:https://access.redhat.com/security/cve/CVE-2021-20231[CVE-2021-20231] -* link:https://access.redhat.com/security/cve/CVE-2021-20232[CVE-2021-20232] -* link:https://access.redhat.com/security/cve/CVE-2021-20266[CVE-2021-20266] -* link:https://access.redhat.com/security/cve/CVE-2021-22876[CVE-2021-22876] -* link:https://access.redhat.com/security/cve/CVE-2021-22898[CVE-2021-22898] -* link:https://access.redhat.com/security/cve/CVE-2021-22925[CVE-2021-22925] -* link:https://access.redhat.com/security/cve/CVE-2021-23840[CVE-2021-23840] -* link:https://access.redhat.com/security/cve/CVE-2021-23841[CVE-2021-23841] -* link:https://access.redhat.com/security/cve/CVE-2021-27645[CVE-2021-27645] -* link:https://access.redhat.com/security/cve/CVE-2021-28153[CVE-2021-28153] -* link:https://access.redhat.com/security/cve/CVE-2021-33560[CVE-2021-33560] -* link:https://access.redhat.com/security/cve/CVE-2021-33574[CVE-2021-33574] -* link:https://access.redhat.com/security/cve/CVE-2021-35942[CVE-2021-35942] -* link:https://access.redhat.com/security/cve/CVE-2021-36084[CVE-2021-36084] -* link:https://access.redhat.com/security/cve/CVE-2021-36085[CVE-2021-36085] -* link:https://access.redhat.com/security/cve/CVE-2021-36086[CVE-2021-36086] -* link:https://access.redhat.com/security/cve/CVE-2021-36087[CVE-2021-36087] -==== - -[id="cluster-logging-release-notes-5-2-2"] -== OpenShift Logging 5.2.2 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3747[RHBA-2021:3747 OpenShift Logging Bug Fix Release 5.2.2] - -[id="openshift-logging-5-2-2-bug-fixes"] -=== Bug fixes - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue.(link:https://issues.redhat.com/browse/LOG-1738[LOG-1738]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1739[LOG-1739]) - -* Before this update, an issue in the bundle manifests prevented installation of the Elasticsearch operator through OLM on OpenShift 4.9. With this update, a correction to bundle manifests re-enables installs and upgrades in 4.9.(link:https://issues.redhat.com/browse/LOG-1780[LOG-1780]) - -[id="openshift-logging-5-2-2-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2020-25648.html[CVE-2020-25648] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37576.html[CVE-2021-37576] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -* link:https://www.redhat.com/security/data/cve/CVE-2021-38201.html[CVE-2021-38201] -==== - -[id="cluster-logging-release-notes-5-2-1"] -== OpenShift Logging 5.2.1 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] - -[id="openshift-logging-5-2-1-bug-fixes"] -=== Bug fixes - -* Before this update, due to an issue in the release pipeline scripts, the value of the `olm.skipRange` field remained unchanged at `5.2.0` instead of reflecting the current release number. This update fixes the pipeline scripts to update the value of this field when the release numbers change. (link:https://issues.redhat.com/browse/LOG-1743[LOG-1743]) - -[id="openshift-logging-5-2-1-CVEs"] -=== CVEs - -(None) diff --git a/modules/cluster-logging-release-notes-5.3.z.adoc b/modules/cluster-logging-release-notes-5.3.z.adoc deleted file mode 100644 index 01db62341b9a..000000000000 --- a/modules/cluster-logging-release-notes-5.3.z.adoc +++ /dev/null @@ -1,247 +0,0 @@ -//Z-stream Release Notes by Version -[id="cluster-logging-release-notes-5-3-7"] -== OpenShift Logging 5.3.7 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2217[RHSA-2022:2217 OpenShift Logging Bug Fix Release 5.3.7] - -[id="openshift-logging-5-3-7-bug-fixes"] -=== Bug fixes -* Before this update, Linux audit log time parsing relied on an ordinal position of key/value pair. This update changes the parsing to utilize a regex to find the time entry. (https://issues.redhat.com/browse/LOG-2322[LOG-2322]) - -* * Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps. (https://issues.redhat.com/browse/LOG-2334[LOG-2334]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2450[LOG-2450]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2481[LOG-2481)]) - -=== CVEs -[id="openshift-logging-5-3-7-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-3-6"] -== OpenShift Logging 5.3.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:1377[RHBA-2022:1377 OpenShift Logging Bug Fix Release 5.3.6] - -[id="openshift-logging-5-3-6-bug-fixes"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2126[LOG-2126]) - -* Before this change, it was possible for the collector to generate a warning where the chunk byte limit was exceeding an emitted event. With this change, you can tune the readline limit to resolve the issue as advised by the upstream documentation. (link:https://issues.redhat.com/browse/LOG-2380[LOG-2380]) - -[id="cluster-logging-release-notes-5-3-5"] -== OpenShift Logging 5.3.5 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHSA-2022:0721[RHSA-2022:0721 OpenShift Logging Bug Fix Release 5.3.5] - -[id="openshift-logging-5-3-5-bug-fixes"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2182[LOG-2182]) - -=== CVEs -[id="openshift-logging-5-3-5-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -==== - -[id="cluster-logging-release-notes-5-3-4"] -== OpenShift Logging 5.3.4 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:0411[RHBA-2022:0411 OpenShift Logging Bug Fix Release 5.3.4] - -[id="openshift-logging-5-3-4-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the `cluster-logging-operator` did not correctly compare existing and desired config maps that contained the dashboard. This update fixes the logic by adding a unique hash value to the object labels. (link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-1974[LOG-1974]) - -* Before this update, elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2127[LOG-2127]) - -=== CVEs -[id="openshift-logging-5-3-4-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -* link:https://access.redhat.com/security/cve/CVE-2022-21248[CVE-2022-21248] -* link:https://access.redhat.com/security/cve/CVE-2022-21277[CVE-2022-21277] -* link:https://access.redhat.com/security/cve/CVE-2022-21282[CVE-2022-21282] -* link:https://access.redhat.com/security/cve/CVE-2022-21283[CVE-2022-21283] -* link:https://access.redhat.com/security/cve/CVE-2022-21291[CVE-2022-21291] -* link:https://access.redhat.com/security/cve/CVE-2022-21293[CVE-2022-21293] -* link:https://access.redhat.com/security/cve/CVE-2022-21294[CVE-2022-21294] -* link:https://access.redhat.com/security/cve/CVE-2022-21296[CVE-2022-21296] -* link:https://access.redhat.com/security/cve/CVE-2022-21299[CVE-2022-21299] -* link:https://access.redhat.com/security/cve/CVE-2022-21305[CVE-2022-21305] -* link:https://access.redhat.com/security/cve/CVE-2022-21340[CVE-2022-21340] -* link:https://access.redhat.com/security/cve/CVE-2022-21341[CVE-2022-21341] -* link:https://access.redhat.com/security/cve/CVE-2022-21360[CVE-2022-21360] -* link:https://access.redhat.com/security/cve/CVE-2022-21365[CVE-2022-21365] -* link:https://access.redhat.com/security/cve/CVE-2022-21366[CVE-2022-21366] -==== - -[id="cluster-logging-release-notes-5-3-3"] -== OpenShift Logging 5.3.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0227[RHSA-2022:0227 OpenShift Logging Bug Fix Release 5.3.3] - -[id="openshift-logging-5-3-3-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the cluster-logging-operator did not correctly compare existing and desired configmaps containing the dashboard. This update fixes the logic by adding a dashboard unique hash value to the object labels.(link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2102[LOG-2102]) - -=== CVEs -[id="openshift-logging-5-3-3-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-3-2"] -== OpenShift Logging 5.3.2 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0044[RHSA-2022:0044 OpenShift Logging Bug Fix Release 5.3.2] - -[id="openshift-logging-5-3-2-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. (link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - -* Before this update, the OpenShift Logging Dashboard displayed the wrong pod namespace in the table that displays top producing and collected containers over the last 24 hours. With this update, the OpenShift Logging Dashboard displays the correct pod namespace. (link:https://issues.redhat.com/browse/LOG-2051[LOG-2051]) - -* Before this update, if `outputDefaults.elasticsearch.structuredTypeKey` in the `ClusterLogForwarder` custom resource (CR) instance did not have a structured key, the CR replaced the output secret with the default secret used to communicate to the default log store. With this update, the defined output secret is correctly used. (link:https://issues.redhat.com/browse/LOG-2046[LOG-2046]) - -[id="openshift-logging-5-3-2-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2020-36327[CVE-2020-36327] -** https://bugzilla.redhat.com/show_bug.cgi?id=1958999[BZ-1958999] -* https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -** https://bugzilla.redhat.com/show_bug.cgi?id=2034067[BZ-2034067] -* https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -==== - -[id="cluster-logging-release-notes-5-3-1"] -== OpenShift Logging 5.3.1 -This release includes link:https://access.redhat.com/errata/RHSA-2021:5129[RHSA-2021:5129 OpenShift Logging Bug Fix Release 5.3.1] - -[id="openshift-logging-5-3-1-bug-fixes"] -=== Bug fixes -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image. (link:https://issues.redhat.com/browse/LOG-1998[LOG-1998]) - -* Before this update, the Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the Logging dashboard displays CPU graphs correctly. (link:https://issues.redhat.com/browse/LOG-1925[LOG-1925]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1897[LOG-1897]) - - -[id="openshift-logging-5-3-1-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1944888[BZ-1944888] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004133[BZ-2004133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004135[BZ-2004135] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2030932[BZ-2030932] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== diff --git a/modules/cluster-logging-release-notes-5.4.0.adoc b/modules/cluster-logging-release-notes-5.4.0.adoc deleted file mode 100644 index 794d45e98e1d..000000000000 --- a/modules/cluster-logging-release-notes-5.4.0.adoc +++ /dev/null @@ -1,53 +0,0 @@ - -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc -[id="cluster-logging-release-notes-5-4-0"] -= Logging 5.4 -The following advisories are available for logging 5.4: -link:https://access.redhat.com/errata/RHSA-2022:1461[{logging-title-uc} Release 5.4] - -[id="openshift-logging-5-4-0-bug-fixes"] -== Bug fixes - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface but were missing when deploying from the command line. This update fixes the issue by making the roles and bindings namespace-scoped. (link:https://issues.redhat.com/browse/LOG-2286[LOG-2286]) - -* Before this update, a prior change to fix dashboard reconciliation introduced a `ownerReferences` field to the resource across namespaces. As a result, both the config map and dashboard were not getting created in the namespace. With this update, the removal of the `ownerReferences` field resolves the issue and the OpenShift Logging dashboard is available in the console. (link:https://issues.redhat.com/browse/LOG-2163[LOG-2163]) - -* Before this update, changes to the metrics dashboards did not deploy because the `cluster-logging-operator` did not correctly compare existing and desired configmaps containing the dashboard. With this update, the addition of a unique hash value to object labels resolves the issue. (link:https://issues.redhat.com/browse/LOG-2071[LOG-2071]) - -* Before this update, the OpenShift Logging dashboard did not correctly display the pods and namespaces in the table, which displays the top producing containers collected over the last 24 hours. With this update, the pods and namespaces are displayed correctly. (link:https://issues.redhat.com/browse/LOG-2069[LOG-2069]) - -* Before this update, when the `ClusterLogForwarder` was set up with `Elasticsearch OutputDefault` and Elasticsearch outputs did not have structured keys, the generated configuration contained the incorrect values for authentication. This update corrects the secret and certificates used. (link:https://issues.redhat.com/browse/LOG-2056[LOG-2056]) - -* Before this update, the OpenShift Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the correct data point has been selected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2026[LOG-2026]) - -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image.(link:https://issues.redhat.com/browse/LOG-1927[LOG-1927]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the logging collector to generate the `FluentdNodeDown` alert. This update resolves the issue by fixing the job name for the Prometheus alert. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, the log collector was collecting its own logs due to a refactoring of the component name change. This could lead to a potential feedback loop of the collector processing its own log that might result in memory and log message size issues. This update resolves the issue by excluding the collector logs from the collection. (link:https://issues.redhat.com/browse/LOG-1774[LOG-1774]) - -* Before this update, Elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2131[LOG-2131]) - -* Before this update, Elasticsearch was unable to return to the ready state when the `elasticsearch-signing `secret was removed. With this update, Elasticsearch is able to go back to the ready state after that secret is removed. (link:https://issues.redhat.com/browse/LOG-2171[LOG-2171]) - -* Before this update, the change of the path from which the collector reads container logs caused the collector to forward some records to the wrong indices. With this update, the collector now uses the correct configuration to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2160[LOG-2160]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1899[LOG-1899]) - -* Before this update, the *{product-title} Logging* dashboard showed the number of shards 'x' times larger than the actual value when Elasticsearch had 'x' nodes. This issue occurred because it was printing all primary shards for each ES pod and calculating a sum on it, although the output was always for the whole ES cluster. With this update, the number of shards is now correctly calculated. (link:https://issues.redhat.com/browse/LOG-2156[LOG-2156]) - -* Before this update, the secrets "kibana" and "kibana-proxy" were not recreated if they were deleted manually. With this update, the `elasticsearch-operator` will watch the resources and automatically recreate them if deleted. (link:https://issues.redhat.com/browse/LOG-2250[LOG-2250]) - -* Before this update, tuning the buffer chunk size could cause the collector to generate a warning about the chunk size exceeding the byte limit for the event stream. With this update, you can also tune the read line limit, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2379[LOG-2379]) - -* Before this update, the logging console link in OpenShift WebConsole was not removed with the ClusterLogging CR. With this update, deleting the CR or uninstalling the Cluster Logging Operator removes the link. (link:https://issues.redhat.com/browse/LOG-2373[LOG-2373]) - -* Before this update, a change to the container logs path caused this metric to always be zero with older releases configured with the original path. With this update, the plugin which exposes metrics about collected logs supports reading from either path to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2462[LOG-2462]) - -== CVEs -[id="openshift-logging-5-4-0-CVEs"] -* link:https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2058404[BZ-2058404] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2045880[BZ-2045880] diff --git a/modules/cluster-logging-release-notes-5.4.z.adoc b/modules/cluster-logging-release-notes-5.4.z.adoc deleted file mode 100644 index 5c16c65d03f4..000000000000 --- a/modules/cluster-logging-release-notes-5.4.z.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//Z-stream Release Notes by Version -[id="cluster-logging-release-notes-5-4-1"] -== Logging 5.4.1 -This release includes https://access.redhat.com/errata/RHSA-2022:2216[RHSA-2022:2216-OpenShift Logging Bug Fix Release 5.4.1]. - -[id="openshift-logging-5-4-1-bug-fixes"] -=== Bug fixes -* Before this update, the log file metric exporter only reported logs created while the exporter was running, which resulted in inaccurate log growth data. This update resolves this issue by monitoring `/var/log/pods`. (https://issues.redhat.com/browse/LOG-2442[LOG-2442]) - -* Before this update, the collector would be blocked because it continually tried to use a stale connection when forwarding logs to fluentd forward receivers. With this release, the `keepalive_timeout` value has been set to 30 seconds (`30s`) so that the collector recycles the connection and re-attempts to send failed messages within a reasonable amount of time. (https://issues.redhat.com/browse/LOG-2534[LOG-2534]) - -* Before this update, an error in the gateway component enforcing tenancy for reading logs limited access to logs with a Kubernetes namespace causing "audit" and some "infrastructure" logs to be unreadable. With this update, the proxy correctly detects users with admin access and allows access to logs without a namespace. (https://issues.redhat.com/browse/LOG-2448[LOG-2448]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2437[LOG-2437]) - -* Before this update, Linux audit log time parsing relied on an ordinal position of a key/value pair. This update changes the parsing to use a regular expression to find the time entry. (https://issues.redhat.com/browse/LOG-2321[LOG-2321]) - - -[id="openshift-logging-5-4-1-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== diff --git a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc b/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc deleted file mode 100644 index aaeee905be20..000000000000 --- a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-removing-unused-components-if-no-elasticsearch_{context}"] -= Removing unused components if you do not use the default Elasticsearch log store - -As an administrator, in the rare case that you forward logs to a third-party log store and do not use the default Elasticsearch log store, you can remove several unused components from your logging cluster. - -In other words, if you do not use the default Elasticsearch log store, you can remove the internal Elasticsearch `logStore` and Kibana `visualization` components from the `ClusterLogging` custom resource (CR). Removing these components is optional but saves resources. - -.Prerequisites - -* Verify that your log forwarder does not send log data to the default internal Elasticsearch cluster. Inspect the `ClusterLogForwarder` CR YAML file that you used to configure log forwarding. Verify that it _does not_ have an `outputRefs` element that specifies `default`. For example: -+ -[source,yaml] ----- -outputRefs: -- default ----- - -[WARNING] -==== -Suppose the `ClusterLogForwarder` CR forwards log data to the internal Elasticsearch cluster, and you remove the `logStore` component from the `ClusterLogging` CR. In that case, the internal Elasticsearch cluster will not be present to store the log data. This absence can cause data loss. -==== - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- - -. If they are present, remove the `logStore` and `visualization` stanzas from the `ClusterLogging` CR. - -. Preserve the `collection` stanza of the `ClusterLogging` CR. The result should look similar to the following example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - collection: - logs: - type: "fluentd" - fluentd: {} ----- - -. Verify that the collector pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- diff --git a/modules/cluster-logging-rn-5.2.11.adoc b/modules/cluster-logging-rn-5.2.11.adoc deleted file mode 100644 index a3b84e3b1145..000000000000 --- a/modules/cluster-logging-rn-5.2.11.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-11"] -= Logging 5.2.11 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5012[RHBA-2022:5012-OpenShift Logging Bug Fix Release 5.2.11] - -[id="openshift-logging-5-2-11-bug-fixes"] -== Bug fixes -* Before this update, clusters configured to perform CloudWatch forwarding wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for CloudWatch has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2635[LOG-2635]) - -[id="openshift-logging-5-2-11-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.2.12.adoc b/modules/cluster-logging-rn-5.2.12.adoc deleted file mode 100644 index 86c386347b62..000000000000 --- a/modules/cluster-logging-rn-5.2.12.adoc +++ /dev/null @@ -1,30 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-12"] -= Logging 5.2.12 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5558[RHBA-2022:5558-OpenShift Logging Bug Fix Release 5.2.12]. - -[id="openshift-logging-5-2-12-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-2-12-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.2.13.adoc b/modules/cluster-logging-rn-5.2.13.adoc deleted file mode 100644 index 468304e0e599..000000000000 --- a/modules/cluster-logging-rn-5.2.13.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-13"] -= Logging 5.2.13 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5909[RHSA-2022:5909-OpenShift Logging Bug Fix Release 5.2.13]. - -[id="openshift-logging-5-2-13-bug-fixes"] -== Bug fixes -* https://bugzilla.redhat.com/show_bug.cgi?id=2100495[BZ-2100495] - -[id="openshift-logging-5-2-13-cves"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] -==== diff --git a/modules/cluster-logging-rn-5.3.10.adoc b/modules/cluster-logging-rn-5.3.10.adoc deleted file mode 100644 index 03d1e82f5efd..000000000000 --- a/modules/cluster-logging-rn-5.3.10.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-10"] -= Logging 5.3.10 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5908[RHSA-2022:5908-OpenShift Logging Bug Fix Release 5.3.10]. - -[id="openshift-logging-5-3-10-bug-fixes"] -== Bug fixes -* https://bugzilla.redhat.com/show_bug.cgi?id=2100495[BZ-2100495] - -[id="openshift-logging-5-3-10-cves"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] -==== diff --git a/modules/cluster-logging-rn-5.3.11.adoc b/modules/cluster-logging-rn-5.3.11.adoc deleted file mode 100644 index 0102585c3311..000000000000 --- a/modules/cluster-logging-rn-5.3.11.adoc +++ /dev/null @@ -1,20 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-11_{context}"] -= Logging 5.3.11 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6182[OpenShift Logging Bug Fix Release 5.3.11]. - -[id="openshift-logging-5-3-11-bug-fixes_{context}"] -== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2871[LOG-2871]) - -[id="openshift-logging-5-3-11-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] diff --git a/modules/cluster-logging-rn-5.3.12.adoc b/modules/cluster-logging-rn-5.3.12.adoc deleted file mode 100644 index 1053b7870d85..000000000000 --- a/modules/cluster-logging-rn-5.3.12.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-12_{context}"] -= Logging 5.3.12 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6560[OpenShift Logging Bug Fix Release 5.3.12]. - -[id="openshift-logging-5-3-12-bug-fixes_{context}"] -== Bug fixes -None. - -[id="openshift-logging-5-3-12-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.3.13.adoc b/modules/cluster-logging-rn-5.3.13.adoc deleted file mode 100644 index 64676f9cd3cf..000000000000 --- a/modules/cluster-logging-rn-5.3.13.adoc +++ /dev/null @@ -1,36 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-13_{context}"] -= Logging 5.3.13 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6882[RHSA-2022:68828-OpenShift Logging Bug Fix Release 5.3.13]. - -[id="openshift-logging-5-3-13-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-3-13-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2022-0494[CVE-2022-0494] -* link:https://access.redhat.com/security/cve/CVE-2022-1353[CVE-2022-1353] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2588[CVE-2022-2588] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-23816[CVE-2022-23816] -* link:https://access.redhat.com/security/cve/CVE-2022-23825[CVE-2022-23825] -* link:https://access.redhat.com/security/cve/CVE-2022-29900[CVE-2022-29900] -* link:https://access.redhat.com/security/cve/CVE-2022-29901[CVE-2022-29901] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] -==== diff --git a/modules/cluster-logging-rn-5.3.14.adoc b/modules/cluster-logging-rn-5.3.14.adoc deleted file mode 100644 index ac7efd450212..000000000000 --- a/modules/cluster-logging-rn-5.3.14.adoc +++ /dev/null @@ -1,84 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-14_{context}"] -= Logging 5.3.14 -This release includes link:https://access.redhat.com/errata/RHSA-2022:8889[OpenShift Logging Bug Fix Release 5.3.14]. - -[id="openshift-logging-5-3-14-bug-fixes"] -== Bug fixes -* Before this update, the log file size map generated by the `log-file-metrics-exporter` component did not remove entries for deleted files, resulting in increased file size, and process memory. With this update, the log file size map does not contain entries for deleted files. (link:https://issues.redhat.com/browse/LOG-3293[LOG-3293]) - -[id="openshift-logging-5-3-14-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] diff --git a/modules/cluster-logging-rn-5.3.8.adoc b/modules/cluster-logging-rn-5.3.8.adoc deleted file mode 100644 index d3f152160b2e..000000000000 --- a/modules/cluster-logging-rn-5.3.8.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-8"] -= Logging 5.3.8 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5010[RHBA-2022:5010-OpenShift Logging Bug Fix Release 5.3.8] - -[id="openshift-logging-5-3-8-bug-fixes"] -== Bug fixes -(None.) - -[id="openshift-logging-5-3-8-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.3.9.adoc b/modules/cluster-logging-rn-5.3.9.adoc deleted file mode 100644 index 1e89c948a680..000000000000 --- a/modules/cluster-logging-rn-5.3.9.adoc +++ /dev/null @@ -1,32 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-9"] -= Logging 5.3.9 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5557[RHBA-2022:5557-OpenShift Logging Bug Fix Release 5.3.9]. - -[id="openshift-logging-5-3-9-bug-fixes"] -== Bug fixes - -* Before this update, the logging collector included a path as a label for the metrics it produced. This path changed frequently and contributed to significant storage changes for the Prometheus server. With this update, the label has been dropped to resolve the issue and reduce storage consumption. (link:https://issues.redhat.com/browse/LOG-2682[LOG-2682]) - - -[id="openshift-logging-5-3-9-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.4.10.adoc b/modules/cluster-logging-rn-5.4.10.adoc deleted file mode 100644 index 0bf8b088e108..000000000000 --- a/modules/cluster-logging-rn-5.4.10.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-10_{context}"] -= Logging 5.4.10 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0385[OpenShift Logging Bug Fix Release 5.4.10]. - -[id="openshift-logging-5-4-10-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-10-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-2056[CVE-2022-2056] -* link:https://access.redhat.com/security/cve/CVE-2022-2057[CVE-2022-2057] -* link:https://access.redhat.com/security/cve/CVE-2022-2058[CVE-2022-2058] -* link:https://access.redhat.com/security/cve/CVE-2022-2519[CVE-2022-2519] -* link:https://access.redhat.com/security/cve/CVE-2022-2520[CVE-2022-2520] -* link:https://access.redhat.com/security/cve/CVE-2022-2521[CVE-2022-2521] -* link:https://access.redhat.com/security/cve/CVE-2022-2867[CVE-2022-2867] -* link:https://access.redhat.com/security/cve/CVE-2022-2868[CVE-2022-2868] -* link:https://access.redhat.com/security/cve/CVE-2022-2869[CVE-2022-2869] -* link:https://access.redhat.com/security/cve/CVE-2022-2953[CVE-2022-2953] -* link:https://access.redhat.com/security/cve/CVE-2022-2964[CVE-2022-2964] -* link:https://access.redhat.com/security/cve/CVE-2022-4139[CVE-2022-4139] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.4.11.adoc b/modules/cluster-logging-rn-5.4.11.adoc deleted file mode 100644 index f659fe685961..000000000000 --- a/modules/cluster-logging-rn-5.4.11.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-11_{context}"] -= Logging 5.4.11 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0632[OpenShift Logging Bug Fix Release 5.4.11]. - -[id="openshift-logging-5-4-11-bug-fixes_{context}"] -== Bug fixes -* link:https://bugzilla.redhat.com/show_bug.cgi?id=2099524[BZ 2099524] -* link:https://bugzilla.redhat.com/show_bug.cgi?id=2161274[BZ 2161274] - -[id="openshift-logging-5-4-11-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.4.12.adoc b/modules/cluster-logging-rn-5.4.12.adoc deleted file mode 100644 index 137dfe68b341..000000000000 --- a/modules/cluster-logging-rn-5.4.12.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-12_{context}"] -= Logging 5.4.12 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0931[OpenShift Logging Bug Fix Release 5.4.12]. - -[id="openshift-logging-5-4-12-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-12-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.4.13.adoc b/modules/cluster-logging-rn-5.4.13.adoc deleted file mode 100644 index cf5b6ccbd808..000000000000 --- a/modules/cluster-logging-rn-5.4.13.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-13_{context}"] -= Logging 5.4.13 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1312[OpenShift Logging Bug Fix Release 5.4.13]. - -[id="openshift-logging-5-4-13-bug-fixes"] -== Bug fixes -* Before this update, a problem with the Fluentd collector caused it to not capture OAuth login events stored in `/var/log/auth-server/audit.log`. This led to incomplete collection of login events from the OAuth service. With this update, the Fluentd collector now resolves this issue by capturing all login events from the OAuth service, including those stored in `/var/log/auth-server/audit.log`, as expected. (link:https://issues.redhat.com/browse/LOG-3731[LOG-3731]) - -[id="openshift-logging-5-4-13-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.4.14.adoc b/modules/cluster-logging-rn-5.4.14.adoc deleted file mode 100644 index 2bd231f72c23..000000000000 --- a/modules/cluster-logging-rn-5.4.14.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-14{context}"] -= Logging 5.4.14 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1843[OpenShift Logging Bug Fix Release 5.4.14]. - -[id="openshift-logging-5-4-14-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-14-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.4.2.adoc b/modules/cluster-logging-rn-5.4.2.adoc deleted file mode 100644 index ab88c1d7bf83..000000000000 --- a/modules/cluster-logging-rn-5.4.2.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-2"] -= Logging 5.4.2 -This release includes link:https://access.redhat.com/errata/RHBA-2022:4874[RHBA-2022:4874-OpenShift Logging Bug Fix Release 5.4.2] - -[id="openshift-logging-5-4-2-bug-fixes"] -== Bug fixes -* Before this update, editing the Collector configuration using `oc edit` was difficult because it had inconsistent use of white-space. This change introduces logic to normalize and format the configuration prior to any updates by the Operator so that it is easy to edit using `oc edit`. (link:https://issues.redhat.com/browse/LOG-2319[LOG-2319]) - -* Before this update, the `FluentdNodeDown` alert could not provide instance labels in the message section appropriately. This update resolves the issue by fixing the alert rule to provide instance labels in cases of partial instance failures. (link:https://issues.redhat.com/browse/LOG-2607[LOG-2607]) - -* Before this update, several log levels, such as`critical`, that were documented as supported by the product were not. This update fixes the discrepancy so the documented log levels are now supported by the product. (link:https://issues.redhat.com/browse/LOG-2033[LOG-2033]) - -[id="openshift-logging-5-4-2-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.4.3.adoc b/modules/cluster-logging-rn-5.4.3.adoc deleted file mode 100644 index efebbe265853..000000000000 --- a/modules/cluster-logging-rn-5.4.3.adoc +++ /dev/null @@ -1,44 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-3"] -= Logging 5.4.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5556[RHSA-2022:5556-OpenShift Logging Bug Fix Release 5.4.3]. - -[id="openshift-logging-elasticsearch-dep"] -== Elasticsearch Operator deprecation notice -In {logging} 5.4.3 the Elasticsearch Operator is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to using the Elasticsearch Operator to manage the default log storage, you can use the Loki Operator. - -[id="openshift-logging-5-4-3-bug-fixes"] -== Bug fixes -* Before this update, the OpenShift Logging Dashboard showed the number of active primary shards instead of all active shards. With this update, the dashboard displays all active shards. (link:https://issues.redhat.com//browse/LOG-2781[LOG-2781]) - -* Before this update, a bug in a library used by `elasticsearch-operator` contained a denial of service attack vulnerability. With this update, the library has been updated to a version that does not contain this vulnerability. (link:https://issues.redhat.com//browse/LOG-2816[LOG-2816]) - -* Before this update, when configuring Vector to forward logs to Loki, it was not possible to set a custom bearer token or use the default token if Loki had TLS enabled. With this update, Vector can forward logs to Loki using tokens with TLS enabled. (link:https://issues.redhat.com//browse/https://issues.redhat.com//browse/LOG-2786[LOG-2786] - -* Before this update, the ElasticSearch Operator omitted the `referencePolicy` property of the `ImageStream` custom resource when selecting an `oauth-proxy` image. This omission caused the Kibana deployment to fail in specific environments. With this update, using `referencePolicy` resolves the issue, and the Operator can deploy Kibana successfully. (link:https://issues.redhat.com/browse/LOG-2791[LOG-2791]) - -* Before this update, alerting rules for the `ClusterLogForwarder` custom resource did not take multiple forward outputs into account. This update resolves the issue. (link:https://issues.redhat.com/browse/LOG-2640[LOG-2640]) - -* Before this update, clusters configured to forward logs to Amazon CloudWatch wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for CloudWatch has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2768[LOG-2768]) - -[id="openshift-logging-5-4-3-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.4.4.adoc b/modules/cluster-logging-rn-5.4.4.adoc deleted file mode 100644 index 227a47277670..000000000000 --- a/modules/cluster-logging-rn-5.4.4.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-4"] -= Logging 5.4.4 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5907[RHBA-2022:5907-OpenShift Logging Bug Fix Release 5.4.4]. - -[id="openshift-logging-5-4-4-bug-fixes"] -== Bug fixes - -* Before this update, non-latin characters displayed incorrectly in Elasticsearch. With this update, Elasticsearch displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2794[LOG-2794]) - -* Before this update, non-latin characters displayed incorrectly in Fluentd. With this update, Fluentd displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2657[LOG-2657]) - -* Before this update, the metrics server for the collector attempted to bind to the address using a value exposed by an environment value. This change modifies the configuration to bind to any available interface. (link:https://issues.redhat.com/browse/LOG-2821[LOG-2821]) - -* Before this update, the `cluster-logging` Operator relied on the cluster to create a secret. This cluster behavior changed in {product-title} 4.11, which caused logging deployments to fail. With this update, the `cluster-logging` Operator resolves the issue by creating the secret if needed. (link:https://issues.redhat.com/browse/LOG-2840[LOG-2840]) - -[id="openshift-logging-5-4-4-cves"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] diff --git a/modules/cluster-logging-rn-5.4.5.adoc b/modules/cluster-logging-rn-5.4.5.adoc deleted file mode 100644 index 678a4775ce8b..000000000000 --- a/modules/cluster-logging-rn-5.4.5.adoc +++ /dev/null @@ -1,26 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-5_{context}"] -= Logging 5.4.5 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6183[RHSA-2022:6183-OpenShift Logging Bug Fix Release 5.4.5]. - -[id="openshift-logging-5-4-5-bug-fixes_{context}"] -== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2881[LOG-2881]) - -* Before this update, the addition of multi-line error detection caused internal routing to change and forward records to the wrong destination. With this update, the internal routing is correct. (link:https://issues.redhat.com/browse/LOG-2946[LOG-2946]) - -* Before this update, the Operator could not decode index setting JSON responses with a quoted Boolean value and would result in an error. With this update, the Operator can properly decode this JSON response. (link:https://issues.redhat.com/browse/LOG-3009[LOG-3009]) - -* Before this update, Elasticsearch index templates defined the fields for labels with the wrong types. This change updates those templates to match the expected types forwarded by the log collector. (link:https://issues.redhat.com/browse/LOG-2972[LOG-2972]) - -[id="openshift-logging-5-4-5-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] diff --git a/modules/cluster-logging-rn-5.4.6.adoc b/modules/cluster-logging-rn-5.4.6.adoc deleted file mode 100644 index 88b38bf0a7aa..000000000000 --- a/modules/cluster-logging-rn-5.4.6.adoc +++ /dev/null @@ -1,25 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-6_{context}"] -= Logging 5.4.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6558[OpenShift Logging Bug Fix Release 5.4.6]. - -[id="openshift-logging-5-4-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, Fluentd would sometimes not recognize that the Kubernetes platform rotated the log file and would no longer read log messages. This update corrects that by setting the configuration parameter suggested by the upstream development team. (link:https://issues.redhat.com/browse/LOG-2792[LOG-2792]) - -* Before this update, each rollover job created empty indices when the `ClusterLogForwarder` custom resource had JSON parsing defined. With this update, new indices are not empty. (link:https://issues.redhat.com/browse/LOG-2823[LOG-2823]) - -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-3054[LOG-3054]) - -[id="openshift-logging-5-4-6-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.4.7.adoc b/modules/cluster-logging-rn-5.4.7.adoc deleted file mode 100644 index 9d65c5b935d5..000000000000 --- a/modules/cluster-logging-rn-5.4.7.adoc +++ /dev/null @@ -1,13 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-7_{context}"] -= Logging 5.4.7 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6857[OpenShift Logging Bug Fix Release 5.4.7]. - -[id="openshift-logging-5-4-7-bug-fixes_{context}"] -== Bug fixes -* (link:https://issues.redhat.com/browse/LOG-2464[LOG-2464]) - -[id="openshift-logging-5-4-7-cves_{context}"] -== CVEs -(None.) diff --git a/modules/cluster-logging-rn-5.4.8.adoc b/modules/cluster-logging-rn-5.4.8.adoc deleted file mode 100644 index d5dcadfe1fb4..000000000000 --- a/modules/cluster-logging-rn-5.4.8.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-8_{context}"] -= Logging 5.4.8 -This release includes link:https://access.redhat.com/errata/RHSA-2022:7435[RHSA-2022:7435-OpenShift Logging Bug Fix Release 5.4.8]. - -[id="openshift-logging-5-4-8-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36518[CVE-2020-36518] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] -* link:https://access.redhat.com/security/cve/CVE-2022-42003[CVE-2022-42003] -* link:https://access.redhat.com/security/cve/CVE-2022-42004[CVE-2022-42004] diff --git a/modules/cluster-logging-rn-5.4.9.adoc b/modules/cluster-logging-rn-5.4.9.adoc deleted file mode 100644 index 89c6b1e57b5a..000000000000 --- a/modules/cluster-logging-rn-5.4.9.adoc +++ /dev/null @@ -1,85 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-9_{context}"] -= Logging 5.4.9 -This release includes link:https://access.redhat.com/errata/RHBA-2022:8780[OpenShift Logging Bug Fix Release 5.4.9]. - -[id="openshift-logging-5-4-9-bug-fixes"] -== Bug fixes -* Before this update, the Fluentd collector would warn of unused configuration parameters. This update removes those configuration parameters and their warning messages. (link:https://issues.redhat.com/browse/LOG-3074[LOG-3074]) - -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3306[LOG-3306]) - -[id="openshift-logging-5-4-9-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] diff --git a/modules/cluster-logging-rn-5.5.10.adoc b/modules/cluster-logging-rn-5.5.10.adoc deleted file mode 100644 index 0ec69d907639..000000000000 --- a/modules/cluster-logging-rn-5.5.10.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-10{context}"] -= Logging 5.5.10 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1827[OpenShift Logging Bug Fix Release 5.5.10]. - -[id="cluster-logging-5-5-10-bug-fixes"] -== Bug fixes -* Before this update, the logging view plugin of the OpenShift Web Console showed only an error text when the LokiStack was not reachable. After this update the plugin shows a proper error message with details on how to fix the unreachable LokiStack. (link:https://issues.redhat.com/browse/LOG-2874[LOG-2874]) - -[id="cluster-logging-5-5-10-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.5.2.adoc b/modules/cluster-logging-rn-5.5.2.adoc deleted file mode 100644 index 7cab2f497647..000000000000 --- a/modules/cluster-logging-rn-5.5.2.adoc +++ /dev/null @@ -1,45 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-2_{context}"] -= Logging 5.5.2 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6559[OpenShift Logging Bug Fix Release 5.5.2]. - -[id="openshift-logging-5-5-2-bug-fixes_{context}"] -== Bug fixes -* Before this update, alerting rules for the Fluentd collector did not adhere to the {product-title} monitoring style guidelines. This update modifies those alerts to include the namespace label, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1823[LOG-1823]) - -* Before this update, the index management rollover script failed to generate a new index name whenever there was more than one hyphen character in the name of the index. With this update, index names generate correctly. (link:https://issues.redhat.com/browse/LOG-2644[LOG-2644]) - -* Before this update, the Kibana route was setting a `caCertificate` value without a certificate present. With this update, no `caCertificate` value is set. (link:https://issues.redhat.com/browse/LOG-2661[LOG-2661]) - -* Before this update, a change in the collector dependencies caused it to issue a warning message for unused parameters. With this update, removing unused configuration parameters resolves the issue. (link:https://issues.redhat.com/browse/LOG-2859[LOG-2859]) - -* Before this update, pods created for deployments that Loki Operator created were mistakenly scheduled on nodes with non-Linux operating systems, if such nodes were available in the cluster the Operator was running in. With this update, the Operator attaches an additional node-selector to the pod definitions which only allows scheduling the pods on Linux-based nodes. (link:https://issues.redhat.com/browse/LOG-2895[LOG-2895]) - -* Before this update, the OpenShift Console Logs view did not filter logs by severity due to a LogQL parser issue in the LokiStack gateway. With this update, a parser fix resolves the issue and the OpenShift Console Logs view can filter by severity. (link:https://issues.redhat.com/browse/LOG-2908[LOG-2908]) - -* Before this update, a refactoring of the Fluentd collector plugins removed the timestamp field for events. This update restores the timestamp field, sourced from the event's received time. (link:https://issues.redhat.com/browse/LOG-2923[LOG-2923]) - -* Before this update, absence of a `level` field in audit logs caused an error in vector logs. With this update, the addition of a `level` field in the audit log record resolves the issue. (link:https://issues.redhat.com/browse/LOG-2961[LOG-2961]) - -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-3053[LOG-3053]) - -* Before this update, each rollover job created empty indices when the `ClusterLogForwarder` custom resource had JSON parsing defined. With this update, new indices are not empty. (link:https://issues.redhat.com/browse/LOG-3063[LOG-3063]) - -* Before this update, when the user deleted the LokiStack after an update to Loki Operator 5.5 resources originally created by Loki Operator 5.4 remained. With this update, the resources' owner-references point to the 5.5 LokiStack. (link:https://issues.redhat.com/browse/LOG-2945[LOG-2945]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-2918[LOG-2918]) - -* Before this update, users with cluster-admin privileges were not able to properly view infrastructure and audit logs using the logging console. With this update, the authorization check has been extended to also recognize users in cluster-admin and dedicated-admin groups as admins. (link:https://issues.redhat.com/browse/LOG-2970[LOG-2970]) - -[id="openshift-logging-5-5-2-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.5.3.adoc b/modules/cluster-logging-rn-5.5.3.adoc deleted file mode 100644 index 59a9a158b01d..000000000000 --- a/modules/cluster-logging-rn-5.5.3.adoc +++ /dev/null @@ -1,36 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-3_{context}"] -= Logging 5.5.3 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6858[OpenShift Logging Bug Fix Release 5.5.3]. - -[id="openshift-logging-5-5-3-bug-fixes_{context}"] -== Bug fixes -* Before this update, log entries that had structured messages included the original message field, which made the entry larger. This update removes the message field for structured logs to reduce the increased size. (link:https://issues.redhat.com/browse/LOG-2759[LOG-2759]) - -* Before this update, the collector configuration excluded logs from `collector`, `default-log-store`, and `visualization` pods, but was unable to exclude logs archived in a `.gz` file. With this update, archived logs stored as `.gz` files of `collector`, `default-log-store`, and `visualization` pods are also excluded. (link:https://issues.redhat.com/browse/LOG-2844[LOG-2844]) - -* Before this update, when requests to an unavailable pod were sent through the gateway, no alert would warn of the disruption. With this update, individual alerts will generate if the gateway has issues completing a write or read request. (link:https://issues.redhat.com/browse/LOG-2884[LOG-2884]) - -* Before this update, pod metadata could be altered by fluent plugins because the values passed through the pipeline by reference. This update ensures each log message receives a copy of the pod metadata so each message processes independently. (link:https://issues.redhat.com/browse/LOG-3046[LOG-3046]) - -* Before this update, selecting *unknown* severity in the OpenShift Console Logs view excluded logs with a `level=unknown` value. With this update, logs without level and with `level=unknown` values are visible when filtering by *unknown* severity. (link:https://issues.redhat.com/browse/LOG-3062[LOG-3062]) - -* Before this update, log records sent to Elasticsearch had an extra field named `write-index` that contained the name of the index to which the logs needed to be sent. This field is not a part of the data model. After this update, this field is no longer sent. (link:https://issues.redhat.com/browse/LOG-3075[LOG-3075]) - -* With the introduction of the new built-in link:https://cloud.redhat.com/blog/pod-security-admission-in-openshift-4.11[Pod Security Admission Controller], Pods not configured in accordance with the enforced security standards defined globally or on the namespace level cannot run. With this update, the Operator and collectors allow privileged execution and run without security audit warnings or errors. (link:https://issues.redhat.com/browse/LOG-3077[LOG-3077]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3095[LOG-3095]) - -[id="openshift-logging-5-5-3-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.5.4.adoc b/modules/cluster-logging-rn-5.5.4.adoc deleted file mode 100644 index eee627dc3237..000000000000 --- a/modules/cluster-logging-rn-5.5.4.adoc +++ /dev/null @@ -1,41 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-4_{context}"] -= Logging 5.5.4 -This release includes link:https://access.redhat.com/errata/RHSA-2022:7434[RHSA-2022:7434-OpenShift Logging Bug Fix Release 5.5.4]. - -[id="openshift-logging-5-5-4-bug-fixes"] -== Bug fixes -* Before this update, an error in the query parser of the logging view plugin caused parts of the logs query to disappear if the query contained curly brackets `{}`. This made the queries invalid, leading to errors being returned for valid queries. With this update, the parser correctly handles these queries. (link:https://issues.redhat.com/browse/LOG-3042[LOG-3042]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3049[LOG-3049]) - -* Before this update, no alerts were implemented to support the collector implementation of Vector. This change adds Vector alerts and deploys separate alerts, depending upon the chosen collector implementation. (link:https://issues.redhat.com/browse/LOG-3127[LOG-3127]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3138[LOG-3138]) - -* Before this update, a prior refactoring of the logging `must-gather` scripts removed the expected location for the artifacts. This update reverts that change to write artifacts to the `/must-gather` folder. (link:https://issues.redhat.com/browse/LOG-3213[LOG-3213]) - -* Before this update, on certain clusters, the Prometheus exporter would bind on IPv4 instead of IPv6. After this update, Fluentd detects the IP version and binds to `0.0.0.0` for IPv4 or `[::]` for IPv6. (link:https://issues.redhat.com/browse/LOG-3162[LOG-3162]) - -[id="openshift-logging-5-5-4-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2022-0494[CVE-2022-0494] -* link:https://access.redhat.com/security/cve/CVE-2022-1353[CVE-2022-1353] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2588[CVE-2022-2588] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-23816[CVE-2022-23816] -* link:https://access.redhat.com/security/cve/CVE-2022-23825[CVE-2022-23825] -* link:https://access.redhat.com/security/cve/CVE-2022-29900[CVE-2022-29900] -* link:https://access.redhat.com/security/cve/CVE-2022-29901[CVE-2022-29901] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] diff --git a/modules/cluster-logging-rn-5.5.5.adoc b/modules/cluster-logging-rn-5.5.5.adoc deleted file mode 100644 index e7314fb41822..000000000000 --- a/modules/cluster-logging-rn-5.5.5.adoc +++ /dev/null @@ -1,93 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-5_{context}"] -= Logging 5.5.5 -This release includes link:https://access.redhat.com/errata/RHSA-2022:8781[OpenShift Logging Bug Fix Release 5.5.5]. - -[id="openshift-logging-5-5-5-bug-fixes"] -== Bug fixes -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3305[LOG-3305]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3284[LOG-3284]) - -* Before this update, the `FluentdQueueLengthIncreasing` alert could fail to fire when there was a cardinality issue with the set of labels returned from this alert expression. This update reduces labels to only include those required for the alert. (https://issues.redhat.com/browse/LOG-3226[LOG-3226]) - -* Before this update, Loki did not have support to reach an external storage in a disconnected cluster. With this update, proxy environment variables and proxy trusted CA bundles are included in the container image to support these connections. (link:https://issues.redhat.com/browse/LOG-2860[LOG-2860]) - -* Before this update, {product-title} web console users could not choose the `ConfigMap` object that includes the CA certificate for Loki, causing pods to operate without the CA. With this update, web console users can select the config map, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3310[LOG-3310]) - -* Before this update, the CA key was used as volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters (such as dots). With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3332[LOG-3332]) - -[id="openshift-logging-5-5-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] diff --git a/modules/cluster-logging-rn-5.5.6.adoc b/modules/cluster-logging-rn-5.5.6.adoc deleted file mode 100644 index 9e904588c6a9..000000000000 --- a/modules/cluster-logging-rn-5.5.6.adoc +++ /dev/null @@ -1,49 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-6_{context}"] -= Logging 5.5.6 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0386[OpenShift Logging Bug Fix Release 5.5.6]. - -[id="openshift-logging-5-5-6-bug-fixes"] -== Bug fixes -* Before this update, the Pod Security admission controller added the label `podSecurityLabelSync = true` to the `openshift-logging` namespace. This resulted in our specified security labels being overwritten, and as a result Collector pods would not start. With this update, the label `podSecurityLabelSync = false` preserves security labels. Collector pods deploy as expected. (link:https://issues.redhat.com/browse/LOG-3340[LOG-3340]) - -* Before this update, the Operator installed the console view plugin, even when it was not enabled on the cluster. This caused the Operator to crash. With this update, if an account for a cluster does not have the console view enabled, the Operator functions normally and does not install the console view. (link:https://issues.redhat.com/browse/LOG-3407[LOG-3407]) - -* Before this update, a prior fix to support a regression where the status of the Elasticsearch deployment was not being updated caused the Operator to crash unless the `Red Hat Elasticsearch Operator` was deployed. With this update, that fix has been reverted so the Operator is now stable but re-introduces the previous issue related to the reported status. (link:https://issues.redhat.com/browse/LOG-3428[LOG-3428]) - -* Before this update, the Loki Operator only deployed one replica of the LokiStack gateway regardless of the chosen stack size. With this update, the number of replicas is correctly configured according to the selected size. (link:https://issues.redhat.com/browse/LOG-3478[LOG-3478]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3341[LOG-3341]) - -* Before this update, the logging view plugin contained an incompatible feature for certain versions of {product-title}. With this update, the correct release stream of the plugin resolves the issue. (link:https://issues.redhat.com/browse/LOG-3467[LOG-3467]) - -* Before this update, the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of one or more pipelines causing the collector pods to restart every 8-10 seconds. With this update, reconciliation of the `ClusterLogForwarder` custom resource processes correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3469[LOG-3469]) - -* Before this change the spec for the `outputDefaults` field of the ClusterLogForwarder custom resource would apply the settings to every declared Elasticsearch output type. This change corrects the behavior to match the enhancement specification where the setting specifically applies to the default managed Elasticsearch store. (link:https://issues.redhat.com/browse/LOG-3342[LOG-3342]) - -* Before this update, the OpenShift CLI (oc) `must-gather` script did not complete because the OpenShift CLI (oc) needs a folder with write permission to build its cache. With this update, the OpenShift CLI (oc) has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3472[LOG-3472]) - -* Before this update, the Loki Operator webhook server caused TLS errors. With this update, the Loki Operator webhook PKI is managed by the Operator Lifecycle Manager's dynamic webhook management resolving the issue. (link:https://issues.redhat.com/browse/LOG-3511[LOG-3511]) - -[id="openshift-logging-5-5-6-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-2056[CVE-2022-2056] -* link:https://access.redhat.com/security/cve/CVE-2022-2057[CVE-2022-2057] -* link:https://access.redhat.com/security/cve/CVE-2022-2058[CVE-2022-2058] -* link:https://access.redhat.com/security/cve/CVE-2022-2519[CVE-2022-2519] -* link:https://access.redhat.com/security/cve/CVE-2022-2520[CVE-2022-2520] -* link:https://access.redhat.com/security/cve/CVE-2022-2521[CVE-2022-2521] -* link:https://access.redhat.com/security/cve/CVE-2022-2867[CVE-2022-2867] -* link:https://access.redhat.com/security/cve/CVE-2022-2868[CVE-2022-2868] -* link:https://access.redhat.com/security/cve/CVE-2022-2869[CVE-2022-2869] -* link:https://access.redhat.com/security/cve/CVE-2022-2953[CVE-2022-2953] -* link:https://access.redhat.com/security/cve/CVE-2022-2964[CVE-2022-2964] -* link:https://access.redhat.com/security/cve/CVE-2022-4139[CVE-2022-4139] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.5.7.adoc b/modules/cluster-logging-rn-5.5.7.adoc deleted file mode 100644 index c8e7b84eba7c..000000000000 --- a/modules/cluster-logging-rn-5.5.7.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-7_{context}"] -= Logging 5.5.7 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0633[OpenShift Logging Bug Fix Release 5.5.7]. - -[id="openshift-logging-5-5-7-bug-fixes"] -== Bug fixes -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3534[LOG-3534]) - -* Before this update, the `ClusterLogForwarder` custom resource (CR) did not pass TLS credentials for syslog output to Fluentd, resulting in errors during forwarding. With this update, credentials pass correctly to Fluentd, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3533[LOG-3533]) - -[id="openshift-logging-5-5-7-CVEs"] -== CVEs -link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.5.8.adoc b/modules/cluster-logging-rn-5.5.8.adoc deleted file mode 100644 index b9062d4d0750..000000000000 --- a/modules/cluster-logging-rn-5.5.8.adoc +++ /dev/null @@ -1,23 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-8_{context}"] -= Logging 5.5.8 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0930[OpenShift Logging Bug Fix Release 5.5.8]. - -[id="openshift-logging-5-5-8-bug-fixes"] -== Bug fixes -* Before this update, the `priority` field was missing from `systemd` logs due to an error in how the collector set `level` fields. With this update, these fields are set correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3630[LOG-3630]) - -[id="openshift-logging-5-5-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-24999[CVE-2022-24999] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.5.9.adoc b/modules/cluster-logging-rn-5.5.9.adoc deleted file mode 100644 index 61c30f80324a..000000000000 --- a/modules/cluster-logging-rn-5.5.9.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-9_{context}"] -= Logging 5.5.9 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1310[OpenShift Logging Bug Fix Release 5.5.9]. - -[id="openshift-logging-5-5-9-bug-fixes"] -== Bug fixes -* Before this update, a problem with the Fluentd collector caused it to not capture OAuth login events stored in `/var/log/auth-server/audit.log`. This led to incomplete collection of login events from the OAuth service. With this update, the Fluentd collector now resolves this issue by capturing all login events from the OAuth service, including those stored in `/var/log/auth-server/audit.log`, as expected.(link:https://issues.redhat.com/browse/LOG-3730[LOG-3730]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received logs now have structured messages included, even when they are forwarded to multiple destinations.(link:https://issues.redhat.com/browse/LOG-3767[LOG-3767]) - -[id="openshift-logging-5-5-9-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.5.adoc b/modules/cluster-logging-rn-5.5.adoc deleted file mode 100644 index 47a81edfe948..000000000000 --- a/modules/cluster-logging-rn-5.5.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes -[id="cluster-logging-release-notes-5-5-0"] -= Logging 5.5 -The following advisories are available for Logging 5.5:link:https://access.redhat.com/errata/RHSA-2022:6051[Release 5.5] - -[id="openshift-logging-5-5-0-enhancements"] -== Enhancements -* With this update, you can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. (link:https://issues.redhat.com/browse/LOG-1296[LOG-1296]) - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -* With this update, you can filter logs with Elasticsearch outputs by using the Kubernetes common labels, `app.kubernetes.io/component`, `app.kubernetes.io/managed-by`, `app.kubernetes.io/part-of`, and `app.kubernetes.io/version`. Non-Elasticsearch output types can use all labels included in `kubernetes.labels`. (link:https://issues.redhat.com/browse/LOG-2388[LOG-2388]) - -* With this update, clusters with AWS Security Token Service (STS) enabled may use STS authentication to forward logs to Amazon CloudWatch. (link:https://issues.redhat.com/browse/LOG-1976[LOG-1976]) - -* With this update, the 'Loki Operator' Operator and Vector collector move from Technical Preview to General Availability. Full feature parity with prior releases are pending, and some APIs remain Technical Previews. See the *Logging with the LokiStack* section for details. - -[id="openshift-logging-5-5-0-bug-fixes"] -== Bug fixes -* Before this update, clusters configured to forward logs to Amazon CloudWatch wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for all storage options has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2746[LOG-2746]) - -* Before this update, the Operator was using versions of some APIs that are deprecated and planned for removal in future versions of {product-title}. This update moves dependencies to the supported API versions. (link:https://issues.redhat.com/browse/LOG-2656[LOG-2656]) - -Before this update, the Operator was using versions of some APIs that are deprecated and planned for removal in future versions of {product-title}. This update moves dependencies to the supported API versions. (link:https://issues.redhat.com/browse/LOG-2656[LOG-2656]) - -* Before this update, multiple `ClusterLogForwarder` pipelines configured for multiline error detection caused the collector to go into a `crashloopbackoff` error state. This update fixes the issue where multiple configuration sections had the same unique ID. (link:https://issues.redhat.com/browse/LOG-2241[LOG-2241]) - -* Before this update, the collector could not save non UTF-8 symbols to the Elasticsearch storage logs. With this update the collector encodes non UTF-8 symbols, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2203[LOG-2203]) - -* Before this update, non-latin characters displayed incorrectly in Kibana. With this update, Kibana displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2784[LOG-2784]) - -== CVEs -[id="openshift-logging-5-5-0-CVEs"] -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2022-1012[CVE-2022-1012] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32250[CVE-2022-32250] diff --git a/modules/cluster-logging-rn-5.6.1.adoc b/modules/cluster-logging-rn-5.6.1.adoc deleted file mode 100644 index 40a28ff3b4f3..000000000000 --- a/modules/cluster-logging-rn-5.6.1.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-1_{context}"] -= Logging 5.6.1 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0634[OpenShift Logging Bug Fix Release 5.6.1]. - -[id="openshift-logging-5-6-1-bug-fixes"] -== Bug fixes -* Before this update, the compactor would report TLS certificate errors from communications with the querier when retention was active. With this update, the compactor and querier no longer communicate erroneously over HTTP. (link:https://issues.redhat.com/browse/LOG-3494[LOG-3494]) - -* Before this update, the Loki Operator would not retry setting the status of the `LokiStack` CR, which caused stale status information. With this update, the Operator retries status information updates on conflict. (link:https://issues.redhat.com/browse/LOG-3496[LOG-3496]) - -* Before this update, the Loki Operator Webhook server caused TLS errors when the `kube-apiserver-operator` Operator checked the webhook validity. With this update, the Loki Operator Webhook PKI is managed by the Operator Lifecycle Manager (OLM), resolving the issue. (link:https://issues.redhat.com/browse/LOG-3510[LOG-3510]) - -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3441[LOG-3441]), (link:https://issues.redhat.com/browse/LOG-3397[LOG-3397]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -* Before this update, the `Red Hat OpenShift Logging` Operator was not available for {product-title} 4.10 clusters because of an incompatibility between {product-title} console and the logging-view-plugin. With this update, the plugin is properly integrated with the {product-title} 4.10 admin console. (link:https://issues.redhat.com/browse/LOG-3447[LOG-3447]) - -* Before this update the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of pipelines that reference the default logstore. With this update, the pipeline validates properly.(link:https://issues.redhat.com/browse/LOG-3477[LOG-3477]) - - -[id="openshift-logging-5-6-1-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] -* link:https://access.redhat.com/security/cve/CVE-2021-35065[CVE-2021-35065] -* link:https://access.redhat.com/security/cve/CVE-2022-46175[CVE-2022-46175] diff --git a/modules/cluster-logging-rn-5.6.2.adoc b/modules/cluster-logging-rn-5.6.2.adoc deleted file mode 100644 index 541faf0a0b82..000000000000 --- a/modules/cluster-logging-rn-5.6.2.adoc +++ /dev/null @@ -1,29 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-2_{context}"] -= Logging 5.6.2 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0793[OpenShift Logging Bug Fix Release 5.6.2]. - -[id="openshift-logging-5-6-2-bug-fixes"] -== Bug fixes -* Before this update, the collector did not set `level` fields correctly based on priority for systemd logs. With this update, `level` fields are set correctly. (link:https://issues.redhat.com/browse/LOG-3429[LOG-3429]) - -* Before this update, the Operator incorrectly generated incompatibility warnings on {product-title} 4.12 or later. With this update, the Operator max {product-title} version value has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3584[LOG-3584]) - -* Before this update, creating a `ClusterLogForwarder` custom resource (CR) with an output value of `default` did not generate any errors. With this update, an error warning that this value is invalid generates appropriately. (link:https://issues.redhat.com/browse/LOG-3437[LOG-3437]) - -* Before this update, when the `ClusterLogForwarder` custom resource (CR) had multiple pipelines configured with one output set as `default`, the collector pods restarted. With this update, the logic for output validation has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3559[LOG-3559]) - -* Before this update, collector pods restarted after being created. With this update, the deployed collector does not restart on its own. (link:https://issues.redhat.com/browse/LOG-3608[LOG-3608]) - -* Before this update, patch releases removed previous versions of the Operators from the catalog. This made installing the old versions impossible. This update changes bundle configurations so that previous releases of the same minor version stay in the catalog. (link:https://issues.redhat.com/browse/LOG-3635[LOG-3635]) - -[id="openshift-logging-5-6-2-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-23521[CVE-2022-23521] -* link:https://access.redhat.com/security/cve/CVE-2022-40303[CVE-2022-40303] -* link:https://access.redhat.com/security/cve/CVE-2022-40304[CVE-2022-40304] -* link:https://access.redhat.com/security/cve/CVE-2022-41903[CVE-2022-41903] -* link:https://access.redhat.com/security/cve/CVE-2022-47629[CVE-2022-47629] -* link:https://access.redhat.com/security/cve/CVE-2023-21835[CVE-2023-21835] -* link:https://access.redhat.com/security/cve/CVE-2023-21843[CVE-2023-21843] diff --git a/modules/cluster-logging-rn-5.6.3.adoc b/modules/cluster-logging-rn-5.6.3.adoc deleted file mode 100644 index 226c1059e7b7..000000000000 --- a/modules/cluster-logging-rn-5.6.3.adoc +++ /dev/null @@ -1,23 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-3_{context}"] -= Logging 5.6.3 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0932[OpenShift Logging Bug Fix Release 5.6.3]. - -[id="openshift-logging-5-6-3-bug-fixes"] -== Bug fixes -* Before this update, the operator stored gateway tenant secret information in a config map. With this update, the operator stores this information in a secret. (link:https://issues.redhat.com/browse/LOG-3717[LOG-3717]) - -* Before this update, the Fluentd collector did not capture OAuth login events stored in `/var/log/auth-server/audit.log`. With this update, Fluentd captures these OAuth login events, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3729[LOG-3729]) - -[id="openshift-logging-5-6-3-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.6.4.adoc b/modules/cluster-logging-rn-5.6.4.adoc deleted file mode 100644 index 9e2cf339fa63..000000000000 --- a/modules/cluster-logging-rn-5.6.4.adoc +++ /dev/null @@ -1,34 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-4_{context}"] -= Logging 5.6.4 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1311[OpenShift Logging Bug Fix Release 5.6.4]. - -[id="openshift-logging-5-6-4-bug-fixes"] -== Bug fixes -* Before this update, when LokiStack was deployed as the log store, the logs generated by Loki pods were collected and sent to LokiStack. With this update, the logs generated by Loki are excluded from collection and will not be stored. (link:https://issues.redhat.com/browse/LOG-3280[LOG-3280]) - -* Before this update, when the query editor on the Logs page of the OpenShift Web Console was empty, the drop-down menus did not populate. With this update, if an empty query is attempted, an error message is displayed and the drop-down menus now populate as expected. (link:https://issues.redhat.com/browse/LOG-3454[LOG-3454]) - -* Before this update, when the `tls.insecureSkipVerify` option was set to `true`, the Cluster Logging Operator would generate incorrect configuration. As a result, the operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Cluster Logging Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3475[LOG-3475]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received messages now have structured messages included, even when they are forwarded to multiple destinations. (link:https://issues.redhat.com/browse/LOG-3640[LOG-3640]) - -* Before this update, if the `collection` field contained `{}` it could result in the Operator crashing. With this update, the Operator will ignore this value, allowing the operator to continue running smoothly without interruption. (link:https://issues.redhat.com/browse/LOG-3733[LOG-3733]) - -* Before this update, the `nodeSelector` attribute for the Gateway component of LokiStack did not have any effect. With this update, the `nodeSelector` attribute functions as expected. (link:https://issues.redhat.com/browse/LOG-3783[LOG-3783]) - -* Before this update, the static LokiStack memberlist configuration relied solely on private IP networks. As a result, when the {product-title} cluster pod network was configured with a public IP range, the LokiStack pods would crashloop. With this update, the LokiStack administrator now has the option to use the pod network for the memberlist configuration. This resolves the issue and prevents the LokiStack pods from entering a crashloop state when the {product-title} cluster pod network is configured with a public IP range. (link:https://issues.redhat.com/browse/LOG-3814[LOG-3814]) - -* Before this update, if the `tls.insecureSkipVerify` field was set to `true`, the Cluster Logging Operator would generate an incorrect configuration. As a result, the Operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3838[LOG-3838]) - -* Before this update, if the Cluster Logging Operator (CLO) was installed without the Elasticsearch Operator, the CLO pod would continuously display an error message related to the deletion of Elasticsearch. With this update, the CLO now performs additional checks before displaying any error messages. As a result, error messages related to Elasticsearch deletion are no longer displayed in the absence of the Elasticsearch Operator.(link:https://issues.redhat.com/browse/LOG-3763[LOG-3763]) - -[id="openshift-logging-5-6-4-CVEs"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.6.5.adoc b/modules/cluster-logging-rn-5.6.5.adoc deleted file mode 100644 index e8c52a5ed062..000000000000 --- a/modules/cluster-logging-rn-5.6.5.adoc +++ /dev/null @@ -1,27 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-5{context}"] -= Logging 5.6.5 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1953[OpenShift Logging Bug Fix Release 5.6.5]. - -[id="openshift-logging-5-6-5-bug-fixes"] -== Bug fixes -* Before this update, the template definitions prevented Elasticsearch from indexing some labels and namespace_labels, causing issues with data ingestion. With this update, the fix replaces dots and slashes in labels to ensure proper ingestion, effectively resolving the issue. (link:https://issues.redhat.com/browse/LOG-3419[LOG-3419]) - -* Before this update, if the Logs page of the OpenShift Web Console failed to connect to the LokiStack, a generic error message was displayed, providing no additional context or troubleshooting suggestions. With this update, the error message has been enhanced to include more specific details and recommendations for troubleshooting. (link:https://issues.redhat.com/browse/LOG-3750[LOG-3750]) - -* Before this update, time range formats were not validated, leading to errors selecting a custom date range. With this update, time formats are now validated, enabling users to select a valid range. If an invalid time range format is selected, an error message is displayed to the user. (link:https://issues.redhat.com/browse/LOG-3583[LOG-3583]) - -* Before this update, when searching logs in Loki, even if the length of an expression did not exceed 5120 characters, the query would fail in many cases. With this update, query authorization label matchers have been optimized, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3480[LOG-3480]) - -* Before this update, the Loki Operator failed to produce a memberlist configuration that was sufficient for locating all the components when using a memberlist for private IPs. With this update, the fix ensures that the generated configuration includes the advertised port, allowing for successful lookup of all components. (link:https://issues.redhat.com/browse/LOG-4008[LOG-4008]) - -[id="openshift-logging-5-6-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4269[CVE-2022-4269] -* link:https://access.redhat.com/security/cve/CVE-2022-4378[CVE-2022-4378] -* link:https://access.redhat.com/security/cve/CVE-2023-0266[CVE-2023-0266] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-0386[CVE-2023-0386] -* link:https://access.redhat.com/security/cve/CVE-2023-27539[CVE-2023-27539] -* link:https://access.redhat.com/security/cve/CVE-2023-28120[CVE-2023-28120] diff --git a/modules/cluster-logging-rn-5.6.adoc b/modules/cluster-logging-rn-5.6.adoc deleted file mode 100644 index 423d87804a18..000000000000 --- a/modules/cluster-logging-rn-5.6.adoc +++ /dev/null @@ -1,85 +0,0 @@ -//included in cluster-logging-release-notes.adoc -:_content-type: ASSEMBLY -[id="cluster-logging-release-notes-5-6_{context}"] -= Logging 5.6 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0264[OpenShift Logging Release 5.6]. - -[id="openshift-logging-5-6-dep-notice_{context}"] -== Deprecation notice -In Logging 5.6, Fluentd is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to fluentd, you can use Vector instead. - -[id="openshift-logging-5-6-enhancements_{context}"] -== Enhancements -* With this update, Logging is compliant with {product-title} cluster-wide cryptographic policies. - (link:https://issues.redhat.com/browse/LOG-895[LOG-895]) - -* With this update, you can declare per-tenant, per-stream, and global policies retention policies through the LokiStack custom resource, ordered by priority. (link:https://issues.redhat.com/browse/LOG-2695[LOG-2695]) - -* With this update, Splunk is an available output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-2913[LOG-2913]) - -* With this update, Vector replaces Fluentd as the default Collector. (link:https://issues.redhat.com/browse/LOG-2222[LOG-2222]) - -* With this update, the *Developer* role can access the per-project workload logs they are assigned to within the Log Console Plugin on clusters running {product-title} 4.11 and higher. (link:https://issues.redhat.com/browse/LOG-3388[LOG-3388]) - -* With this update, logs from any source contain a field `openshift.cluster_id`, the unique identifier of the cluster in which the Operator is deployed. You can view the `clusterID` value with the command below. (link:https://issues.redhat.com/browse/LOG-2715[LOG-2715]) - -include::snippets/logging-get-clusterid-snip.adoc[lines=9..12] - -[id="openshift-logging-5-6-known-issues_{context}"] -== Known Issues -* Before this update, Elasticsearch would reject logs if multiple label keys had the same prefix and some keys included the `.` character. This fixes the limitation of Elasticsearch by replacing `.` in the label keys with `_`. As a workaround for this issue, remove the labels that cause errors, or add a namespace to the label. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -[id="openshift-logging-5-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-2993[LOG-2993]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-3072[LOG-3072]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3090[LOG-3090]) - -* Before this update, the CA key was used as the volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters, such as dots. With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3331[LOG-3331]) - -* Before this update, a default value set within the LokiStack Custom Resource Definition, caused an inability to create a LokiStack instance without a `ReplicationFactor` of `1`. With this update, the operator sets the actual value for the size used. (link:https://issues.redhat.com/browse/LOG-3296[LOG-3296]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3195[LOG-3195]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3161[LOG-3161]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3157[LOG-3157]) - -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3129[LOG-3129]) - -* Before this update, the Operators general pattern for reconciling resources was to try and create before attempting to get or update which would lead to constant HTTP 409 responses after creation. With this update, Operators first attempt to retrieve an object and only create or update it if it is either missing or not as specified. (link:https://issues.redhat.com/browse/LOG-2919[LOG-2919]) - -* Before this update, the `.level` and`.structure.level` fields in Fluentd could contain different values. With this update, the values are the same for each field. (link:https://issues.redhat.com/browse/LOG-2819[LOG-2819]) - -* Before this update, the Operator did not wait for the population of the trusted CA bundle and deployed the collector a second time once the bundle updated. With this update, the Operator waits briefly to see if the bundle has been populated before it continues the collector deployment. (link:https://issues.redhat.com/browse/LOG-2789[LOG-2789]) - -* Before this update, logging telemetry info appeared twice when reviewing metrics. With this update, logging telemetry info displays as expected. (link:https://issues.redhat.com/browse/LOG-2315[LOG-2315]) - -* Before this update, Fluentd pod logs contained a warning message after enabling the JSON parsing addition. With this update, that warning message does not appear. (link:https://issues.redhat.com/browse/LOG-1806[LOG-1806]) - -* Before this update, the `must-gather` script did not complete because `oc` needs a folder with write permission to build its cache. With this update, `oc` has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3446[LOG-3446]) - -* Before this update the log collector SCC could be superseded by other SCCs on the cluster, rendering the collector unusable. This update sets the priority of the log collector SCC so that it takes precedence over the others. (link:https://issues.redhat.com/browse/LOG-3235[LOG-3235]) - -* Before this update, Vector was missing the field `sequence`, which was added to fluentd as a way to deal with a lack of actual nanoseconds precision. With this update, the field `openshift.sequence` has been added to the event logs. (link:https://issues.redhat.com/browse/LOG-3106[LOG-3106]) - -[id="openshift-logging-5-6-cves_{context}"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2020-36518[CVE-2020-36518] -* https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* https://access.redhat.com/security/cve/CVE-2022-2879[CVE-2022-2879] -* https://access.redhat.com/security/cve/CVE-2022-2880[CVE-2022-2880] -* https://access.redhat.com/security/cve/CVE-2022-27664[CVE-2022-27664] -* https://access.redhat.com/security/cve/CVE-2022-32190[CVE-2022-32190] -* https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* https://access.redhat.com/security/cve/CVE-2022-37601[CVE-2022-37601] -* https://access.redhat.com/security/cve/CVE-2022-41715[CVE-2022-41715] -* https://access.redhat.com/security/cve/CVE-2022-42003[CVE-2022-42003] -* https://access.redhat.com/security/cve/CVE-2022-42004[CVE-2022-42004] -* https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.7.0.adoc b/modules/cluster-logging-rn-5.7.0.adoc deleted file mode 100644 index b8dfbfda3dc4..000000000000 --- a/modules/cluster-logging-rn-5.7.0.adoc +++ /dev/null @@ -1,24 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-0{context}"] -= Logging 5.7.0 -This release includes link:https://access.redhat.com/errata/RHBA-2023:2133[OpenShift Logging Bug Fix Release 5.7.0]. - -[id="openshift-logging-5-7-enhancements"] -== Enhancements -With this update, you can enable logging to detect multi-line exceptions and reassemble them into a single log entry. - -To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - -[id="openshift-logging-5-7-known-issues"] -== Known Issues -None. - -[id="openshift-logging-5-7-0-bug-fixes"] -== Bug fixes -* Before this update, the `nodeSelector` attribute for the Gateway component of the LokiStack did not impact node scheduling. With this update, the `nodeSelector` attribute works as expected. (link:https://issues.redhat.com/browse/LOG-3713[LOG-3713]) - -[id="openshift-logging-5-7-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-1999[CVE-2023-1999] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/cluster-logging-systemd-scaling.adoc b/modules/cluster-logging-systemd-scaling.adoc deleted file mode 100644 index 11f2b0cfaa03..000000000000 --- a/modules/cluster-logging-systemd-scaling.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-systemd - -:_content-type: PROCEDURE -[id="cluster-logging-systemd-scaling_{context}"] -= Configuring systemd-journald for OpenShift Logging - -As you scale up your project, the default logging environment might need some -adjustments. - -For example, if you are missing logs, you might have to increase the rate limits for journald. -You can adjust the number of messages to retain for a specified period of time to ensure that -OpenShift Logging does not use excessive resources without dropping logs. - -You can also determine if you want the logs compressed, how long to retain logs, how or if the logs are stored, -and other settings. - -.Procedure - -. Create a Butane config file, `40-worker-custom-journald.bu`, that includes an `/etc/systemd/journald.conf` file with the required settings. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: 40-worker-custom-journald - labels: - machineconfiguration.openshift.io/role: "worker" -storage: - files: - - path: /etc/systemd/journald.conf - mode: 0644 <1> - overwrite: true - contents: - inline: | - Compress=yes <2> - ForwardToConsole=no <3> - ForwardToSyslog=no - MaxRetentionSec=1month <4> - RateLimitBurst=10000 <5> - RateLimitIntervalSec=30s - Storage=persistent <6> - SyncIntervalSec=1s <7> - SystemMaxUse=8G <8> - SystemKeepFree=20% <9> - SystemMaxFileSize=10M <10> ----- -+ -<1> Set the permissions for the `journal.conf` file. It is recommended to set `0644` permissions. -<2> Specify whether you want logs compressed before they are written to the file system. -Specify `yes` to compress the message or `no` to not compress. The default is `yes`. -<3> Configure whether to forward log messages. Defaults to `no` for each. Specify: -* `ForwardToConsole` to forward logs to the system console. -* `ForwardToKsmg` to forward logs to the kernel log buffer. -* `ForwardToSyslog` to forward to a syslog daemon. -* `ForwardToWall` to forward messages as wall messages to all logged-in users. -<4> Specify the maximum time to store journal entries. Enter a number to specify seconds. Or -include a unit: "year", "month", "week", "day", "h" or "m". Enter `0` to disable. The default is `1month`. -<5> Configure rate limiting. If more logs are received than what is specified in `RateLimitBurst` during the time interval defined by `RateLimitIntervalSec`, all further messages within the interval are dropped until the interval is over. It is recommended to set `RateLimitIntervalSec=30s` and `RateLimitBurst=10000`, which are the defaults. -<6> Specify how logs are stored. The default is `persistent`: -* `volatile` to store logs in memory in `/var/log/journal/`. -* `persistent` to store logs to disk in `/var/log/journal/`. systemd creates the directory if it does not exist. -* `auto` to store logs in `/var/log/journal/` if the directory exists. If it does not exist, systemd temporarily stores logs in `/run/systemd/journal`. -* `none` to not store logs. systemd drops all logs. -<7> Specify the timeout before synchronizing journal files to disk for *ERR*, *WARNING*, *NOTICE*, *INFO*, and *DEBUG* logs. -systemd immediately syncs after receiving a *CRIT*, *ALERT*, or *EMERG* log. The default is `1s`. -<8> Specify the maximum size the journal can use. The default is `8G`. -<9> Specify how much disk space systemd must leave free. The default is `20%`. -<10> Specify the maximum size for individual journal files stored persistently in `/var/log/journal`. The default is `10M`. -+ -[NOTE] -==== -If you are removing the rate limit, you might see increased CPU utilization on the -system logging daemons as it processes any messages that would have previously -been throttled. -==== -+ -For more information on systemd settings, see link:https://www.freedesktop.org/software/systemd/man/journald.conf.html[https://www.freedesktop.org/software/systemd/man/journald.conf.html]. The default settings listed on that page might not apply to {product-title}. -+ -// Defaults from https://github.com/openshift/openshift-ansible/pull/3753/files#diff-40b7a7231e77d95ca6009dc9bcc0f470R33-R34 - -. Use Butane to generate a `MachineConfig` object file, `40-worker-custom-journald.yaml`, containing the configuration to be delivered to the nodes: -+ -[source,terminal] ----- -$ butane 40-worker-custom-journald.bu -o 40-worker-custom-journald.yaml ----- - -. Apply the machine config. For example: -+ -[source,terminal] ----- -$ oc apply -f 40-worker-custom-journald.yaml ----- -+ -The controller detects the new `MachineConfig` object and generates a new `rendered-worker-` version. - -. Monitor the status of the rollout of the new rendered configuration to each node: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output -[source,terminal] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool - -... - -Conditions: - Message: - Reason: All nodes are updating to rendered-worker-913514517bcea7c93bd446f4830bc64e ----- diff --git a/modules/cluster-logging-troubleshoot-logging.adoc b/modules/cluster-logging-troubleshoot-logging.adoc deleted file mode 100644 index da30383f4ebc..000000000000 --- a/modules/cluster-logging-troubleshoot-logging.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-troubleshoot-logging-about_{context}"] -= About troubleshooting {product-title} Logging - -You can troubleshoot the logging issues by performing the following tasks: - -* Viewing logging status -* Viewing the status of the log store -* Understanding logging alerts -* Collecting logging data for Red Hat Support -* Troubleshooting for critical alerts diff --git a/modules/cluster-logging-troubleshooting-log-forwarding.adoc b/modules/cluster-logging-troubleshooting-log-forwarding.adoc deleted file mode 100644 index 943b5cf35438..000000000000 --- a/modules/cluster-logging-troubleshooting-log-forwarding.adoc +++ /dev/null @@ -1,19 +0,0 @@ - -:_content-type: PROCEDURE -[id="cluster-logging-troubleshooting-log-forwarding_{context}"] -= Troubleshooting log forwarding - -When you create a `ClusterLogForwarder` custom resource (CR), if the Red Hat OpenShift Logging Operator does not redeploy the Fluentd pods automatically, you can delete the Fluentd pods to force them to redeploy. - -.Prerequisites - -* You have created a `ClusterLogForwarder` custom resource (CR) object. - -.Procedure - -* Delete the Fluentd pods to force them to redeploy. -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc b/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc deleted file mode 100644 index acfe2f9376ab..000000000000 --- a/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc +++ /dev/null @@ -1,137 +0,0 @@ -:_module-type: PROCEDURE - -:_content-type: PROCEDURE -[id="cluser-logging-troubleshooting-loki-entry-out-of-order-messages_{context}"] -= Troubleshooting Loki "entry out of order" errors - -If your Fluentd forwards a large block of messages to a Loki logging system that exceeds the rate limit, Loki to generates "entry out of order" errors. To fix this issue, you update some values in the Loki server configuration file, `loki.yaml`. - -[NOTE] -==== -`loki.yaml` is not available on Grafana-hosted Loki. This topic does not apply to Grafana-hosted Loki servers. -==== - -.Conditions - -* The `ClusterLogForwarder` custom resource is configured to forward logs to Loki. - -* Your system sends a block of messages that is larger than 2 MB to Loki, such as: -+ ----- -"values":[["1630410392689800468","{\"kind\":\"Event\",\"apiVersion\":\ -....... -...... -...... -...... -\"received_at\":\"2021-08-31T11:46:32.800278+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-31T11:46:32.799692+00:00\",\"viaq_index_name\":\"audit-write\",\"viaq_msg_id\":\"MzFjYjJkZjItNjY0MC00YWU4LWIwMTEtNGNmM2E5ZmViMGU4\",\"log_type\":\"audit\"}"]]}]} ----- - -* When you enter `oc logs -c fluentd`, the Fluentd logs in your OpenShift Logging cluster show the following messages: -+ -[source,text] ----- -429 Too Many Requests Ingestion rate limit exceeded (limit: 8388608 bytes/sec) while attempting to ingest '2140' lines totaling '3285284' bytes - -429 Too Many Requests Ingestion rate limit exceeded' or '500 Internal Server Error rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5277702 vs. 4194304)' ----- - -* When you open the logs on the Loki server, they display `entry out of order` messages like these: -+ -[source,text] ----- -,\nentry with timestamp 2021-08-18 05:58:55.061936 +0000 UTC ignored, reason: 'entry out of order' for stream: - -{fluentd_thread=\"flush_thread_0\", log_type=\"audit\"},\nentry with timestamp 2021-08-18 06:01:18.290229 +0000 UTC ignored, reason: 'entry out of order' for stream: {fluentd_thread="flush_thread_0", log_type="audit"} ----- - -.Procedure - -. Update the following fields in the `loki.yaml` configuration file on the Loki server with the values shown here: -+ - * `grpc_server_max_recv_msg_size: 8388608` - * `chunk_target_size: 8388608` - * `ingestion_rate_mb: 8` - * `ingestion_burst_size_mb: 16` - -. Apply the changes in `loki.yaml` to the Loki server. - -.Example `loki.yaml` file -[source,yaml] ----- -auth_enabled: false - -server: - http_listen_port: 3100 - grpc_listen_port: 9096 - grpc_server_max_recv_msg_size: 8388608 - -ingester: - wal: - enabled: true - dir: /tmp/wal - lifecycler: - address: 127.0.0.1 - ring: - kvstore: - store: inmemory - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed - chunk_target_size: 8388608 - max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h - chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m) - max_transfer_retries: 0 # Chunk transfers disabled - -schema_config: - configs: - - from: 2020-10-24 - store: boltdb-shipper - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 24h - -storage_config: - boltdb_shipper: - active_index_directory: /tmp/loki/boltdb-shipper-active - cache_location: /tmp/loki/boltdb-shipper-cache - cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space - shared_store: filesystem - filesystem: - directory: /tmp/loki/chunks - -compactor: - working_directory: /tmp/loki/boltdb-shipper-compactor - shared_store: filesystem - -limits_config: - reject_old_samples: true - reject_old_samples_max_age: 12h - ingestion_rate_mb: 8 - ingestion_burst_size_mb: 16 - -chunk_store_config: - max_look_back_period: 0s - -table_manager: - retention_deletes_enabled: false - retention_period: 0s - -ruler: - storage: - type: local - local: - directory: /tmp/loki/rules - rule_path: /tmp/loki/rules-temp - alertmanager_url: http://localhost:9093 - ring: - kvstore: - store: inmemory - enable_api: true ----- - -[role="_additional-resources"] -.Additional resources - -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki] diff --git a/modules/cluster-logging-troubleshooting-unknown.adoc b/modules/cluster-logging-troubleshooting-unknown.adoc deleted file mode 100644 index 92e173aa7ce8..000000000000 --- a/modules/cluster-logging-troubleshooting-unknown.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-troublehsooting.adoc - -[id="cluster-logging-troubleshooting-unknown_{context}"] -= Troubleshooting a Kubernetes unknown error while connecting to Elasticsearch - -If you are attempting to use a F-5 load balancer in front of Kibana with -`X-Forwarded-For` enabled, this can cause an issue in which the Elasticsearch -`Searchguard` plugin is unable to correctly accept connections from Kibana. - -.Example Kibana Error Message ----- -Kibana: Unknown error while connecting to Elasticsearch - -Error: Unknown error while connecting to Elasticsearch -Error: UnknownHostException[No trusted proxies] ----- - -.Procedure - -To configure Searchguard to ignore the extra header: - -. Scale down all Fluentd pods. - -. Scale down Elasticsearch after the Fluentd pods have terminated. - -. Add `searchguard.http.xforwardedfor.header: DUMMY` to the Elasticsearch -configuration section. -+ -[source,terminal] ----- -$ oc edit configmap/elasticsearch <1> ----- -<1> This approach requires that Elasticsearch configurations are within a config map. -+ - -. Scale Elasticsearch back up. - -. Scale up all Fluentd pods. diff --git a/modules/cluster-logging-uninstall.adoc b/modules/cluster-logging-uninstall.adoc deleted file mode 100644 index 39b4b50f3cbd..000000000000 --- a/modules/cluster-logging-uninstall.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-uninstall.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-uninstall_{context}"] -= Uninstalling the {logging-title} - -You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). After deleting the CR, there are other {logging} components that remain, which you can optionally remove. - - -Deleting the `ClusterLogging` CR does not remove the persistent volume claims (PVCs). To preserve or delete the remaining PVCs, persistent volumes (PVs), and associated data, you must take further action. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -To remove OpenShift Logging: - -. Use the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] - to remove the `ClusterLogging` CR: - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. On the *Custom Resource Definitions* page, click *ClusterLogging*. - -.. On the *Custom Resource Definition Details* page, click *Instances*. - -.. Click the Options menu {kebab} next to the instance and select *Delete ClusterLogging*. - -. Optional: Delete the custom resource definitions (CRD): - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. Click the Options menu {kebab} next to *ClusterLogForwarder* and select *Delete Custom Resource Definition*. - -.. Click the Options menu {kebab} next to *ClusterLogging* and select *Delete Custom Resource Definition*. - -.. Click the Options menu {kebab} next to *Elasticsearch* and select *Delete Custom Resource Definition*. - -. Optional: Remove the Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator: - -.. Switch to the *Operators* -> *Installed Operators* page. - -.. Click the Options menu {kebab} next to the Red Hat OpenShift Logging Operator and select *Uninstall Operator*. - -.. Click the Options menu {kebab} next to the OpenShift Elasticsearch Operator and select *Uninstall Operator*. - -. Optional: Remove the OpenShift Logging and Elasticsearch projects. - -.. Switch to the *Home* -> *Projects* page. - -.. Click the Options menu {kebab} next to the *openshift-logging* project and select *Delete Project*. - -.. Confirm the deletion by typing `openshift-logging` in the dialog box and click *Delete*. - -.. Click the Options menu {kebab} next to the *openshift-operators-redhat* project and select *Delete Project*. -+ -[IMPORTANT] -==== -Do not delete the `openshift-operators-redhat` project if other global operators are installed in this namespace. -==== - -.. Confirm the deletion by typing `openshift-operators-redhat` in the dialog box and click *Delete*. - -. To keep the PVCs for reuse with other pods, keep the labels or PVC names that you need to reclaim the PVCs. - -. Optional: If you do not want to keep the PVCs, you can delete them. -+ -[WARNING] -==== -Releasing or deleting PVCs can delete PVs and cause data loss. -==== - -.. Switch to the *Storage* -> *Persistent Volume Claims* page. - -.. Click the Options menu {kebab} next to each PVC and select *Delete Persistent Volume Claim*. - -.. If you want to recover storage space, you can delete the PVs. diff --git a/modules/cluster-logging-update-logging.adoc b/modules/cluster-logging-update-logging.adoc deleted file mode 100644 index 06724db46dc1..000000000000 --- a/modules/cluster-logging-update-logging.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-update-logging-about_{context}"] -= About updating {product-title} Logging - -{product-title} allows you to update {product-title} logging. You must update the following operators while updating {product-title} Logging: - -* Elasticsearch Operator -* Cluster Logging Operator diff --git a/modules/cluster-logging-updating-logging-to-5-0.adoc b/modules/cluster-logging-updating-logging-to-5-0.adoc deleted file mode 100644 index bdf2c3d5ff47..000000000000 --- a/modules/cluster-logging-updating-logging-to-5-0.adoc +++ /dev/null @@ -1,225 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to-5-0_{context}"] -= Updating from cluster logging in {product-title} 4.6 or earlier to OpenShift Logging 5.x - -{product-title} 4.7 made the following name changes: - -* The _cluster logging_ feature became the _Red Hat OpenShift Logging_ 5.x product. -* The _Cluster Logging_ Operator became the _Red Hat OpenShift Logging_ Operator. -* The _Elasticsearch_ Operator became _OpenShift Elasticsearch_ Operator. - -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7, 4.8, or 4.9. Then, you update the following operators: - -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - -If you update the operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The OpenShift Logging status is healthy: -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your Elasticsearch and Kibana data is backed up. - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Update the Cluster Logging Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-logging` project. - -.. Click the *Cluster Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the Red Hat OpenShift Logging Operator version is 5.0.x or 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.0 or 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated to 5.0 or 5.x: -+ -[source,terminal] ----- -$ oc get ds fluentd -o json | grep fluentd-init ----- -+ -Verify that the output includes a `fluentd-init` container: -+ -[source,terminal] ----- -"containerName": "fluentd-init" ----- - -.. Verify that the log visualizer is updated to 5.0 or 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-updating-logging-to-5-1.adoc b/modules/cluster-logging-updating-logging-to-5-1.adoc deleted file mode 100644 index dbc194adc050..000000000000 --- a/modules/cluster-logging-updating-logging-to-5-1.adoc +++ /dev/null @@ -1,218 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to-5-1_{context}"] -= Updating OpenShift Logging to the current version - -To update OpenShift Logging from 5.x to the current version, you change the subscriptions for the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - - -If you update the operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The OpenShift Logging status is healthy: -+ -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your Elasticsearch and Kibana data is backed up. - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Update the Red Hat OpenShift Logging Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-logging` project. - -.. Click the *Red Hat OpenShift Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the Red Hat OpenShift Logging Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated to 5.3: -+ -[source,terminal] ----- -$ oc get ds collector -o json | grep collector ----- -+ -Verify that the output includes a `fluentd-init` container: -+ -[source,terminal] ----- -"containerName": "collector" ----- - -.. Verify that the log visualizer is updated to 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-updating-logging-to-current.adoc b/modules/cluster-logging-updating-logging-to-current.adoc deleted file mode 100644 index fd413f21d54a..000000000000 --- a/modules/cluster-logging-updating-logging-to-current.adoc +++ /dev/null @@ -1,230 +0,0 @@ -// Module file include in the following assemblies: -//cluster-logging-upgrading.adoc -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to_current_{context}"] -= Updating Logging to the current version - -To update Logging to the current version, you change the subscriptions for the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - - -If you update the Operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The Logging status is healthy: -+ -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your link:https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html[Elasticsearch and Kibana data is backed up.] - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] - -.. Select the `openshift-Operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -.. Wait for the *Status* field to report *Succeeded*. - -. Update the Red Hat OpenShift Logging Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] - -.. Select the `openshift-logging` project. - -.. Click the *Red Hat OpenShift Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the Red Hat OpenShift Logging Operator version is 5.y.z -+ -.. Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -.. Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated: -+ -[source,terminal] ----- -$ oc get ds collector -o json | grep collector ----- -+ -.. Verify that the output includes a `collectort` container: -+ -[source,terminal] ----- -"containerName": "collector" ----- - -.. Verify that the log visualizer is updated to 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -.. Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-vector-tech-preview.adoc b/modules/cluster-logging-vector-tech-preview.adoc deleted file mode 100644 index f109e303b706..000000000000 --- a/modules/cluster-logging-vector-tech-preview.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-vector-tech-preview_{context}"] -:FeatureName: Vector -include::snippets/technology-preview.adoc[] - -[id="cluster-logging-about-vector"] -= About Vector -Vector is a log collector offered as a tech-preview alternative to the current default collector for the {logging}. - -The following outputs are supported: - -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `kafka`. A Kafka broker. The `kafka` output can use an unsecured or TLS connection. - -* `loki`. Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. - - -[id="cluster-logging-enabling-vector"] -== Enabling Vector -Vector is not enabled by default. Use the following steps to enable Vector on your {product-title} cluster. - -[IMPORTANT] -==== -Vector does not support FIPS Enabled Clusters. -==== - -.Prerequisites - -* {product-title}: {product-version} -* {logging-title-uc}: 5.4 -* FIPS disabled - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- - -. Add a `logging.openshift.io/preview-vector-collector: enabled` annotation to the `ClusterLogging` custom resource (CR). - -. Add `vector` as a collection type to the `ClusterLogging` custom resource (CR). - -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: "ClusterLogging" - metadata: - name: "instance" - namespace: "openshift-logging" - annotations: - logging.openshift.io/preview-vector-collector: enabled - spec: - collection: - logs: - type: "vector" - vector: {} ----- - -[role="_additional-resources"] -.Additional resources -* link:https://vector.dev/docs/about/what-is-vector/[Vector Documentation] diff --git a/modules/cluster-logging-view-cluster-dashboards.adoc b/modules/cluster-logging-view-cluster-dashboards.adoc deleted file mode 100644 index 9205ecfca60f..000000000000 --- a/modules/cluster-logging-view-cluster-dashboards.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-view-cluster-dashboards-about_{context}"] -= About viewing the cluster dashboard - -The {product-title} Logging dashboard contains charts that show details about your Elasticsearch instance at the cluster level. These charts help you diagnose and anticipate problems. diff --git a/modules/cluster-logging-visualizer-indices.adoc b/modules/cluster-logging-visualizer-indices.adoc deleted file mode 100644 index bd796c77befd..000000000000 --- a/modules/cluster-logging-visualizer-indices.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-visualizer-indices_{context}"] -= Defining Kibana index patterns - -An index pattern defines the Elasticsearch indices that you want to visualize. To explore and visualize data in Kibana, you must create an index pattern. - -.Prerequisites - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -* Elasticsearch documents must be indexed before you can create index patterns. This is done automatically, but it might take a few minutes in a new or updated cluster. - -.Procedure - -To define index patterns and create visualizations in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Create your Kibana index patterns by clicking *Management* -> *Index Patterns* -> *Create index pattern*: - -** Each user must manually create index patterns when logging into Kibana the first time to see logs for their projects. Users must create an index pattern named `app` and use the `@timestamp` time field to view their container logs. - -** Each admin user must create index patterns when logged into Kibana the first time for the `app`, `infra`, and `audit` indices using the `@timestamp` time field. - -. Create Kibana Visualizations from the new index patterns. diff --git a/modules/cluster-logging-visualizer-kibana.adoc b/modules/cluster-logging-visualizer-kibana.adoc deleted file mode 100644 index 4c1451412763..000000000000 --- a/modules/cluster-logging-visualizer-kibana.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/viewing/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-visualizer-kibana_{context}"] -= Viewing cluster logs in Kibana - -You view cluster logs in the Kibana web console. The methods for viewing and visualizing your data in Kibana that are beyond the scope of this documentation. For more information, refer to the link:https://www.elastic.co/guide/en/kibana/6.8/tutorial-sample-discover.html[Kibana documentation]. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* Kibana index patterns must exist. - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -.Procedure - -To view logs in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. - -. In Kibana, click *Discover*. - -. Select the index pattern you created from the drop-down menu in the top-left corner: *app*, *audit*, or *infra*. -+ -The log data displays as time-stamped documents. - -. Expand one of the time-stamped documents. - -. Click the *JSON* tab to display the log entry for that document. -+ -.Sample infrastructure log entry in Kibana -[%collapsible] -==== -[source,terminal] ----- -{ - "_index": "infra-000001", - "_type": "_doc", - "_id": "YmJmYTBlNDkZTRmLTliMGQtMjE3NmFiOGUyOWM3", - "_version": 1, - "_score": null, - "_source": { - "docker": { - "container_id": "f85fa55bbef7bb783f041066be1e7c267a6b88c4603dfce213e32c1" - }, - "kubernetes": { - "container_name": "registry-server", - "namespace_name": "openshift-marketplace", - "pod_name": "redhat-marketplace-n64gc", - "container_image": "registry.redhat.io/redhat/redhat-marketplace-index:v4.7", - "container_image_id": "registry.redhat.io/redhat/redhat-marketplace-index@sha256:65fc0c45aabb95809e376feb065771ecda9e5e59cc8b3024c4545c168f", - "pod_id": "8f594ea2-c866-4b5c-a1c8-a50756704b2a", - "host": "ip-10-0-182-28.us-east-2.compute.internal", - "master_url": "https://kubernetes.default.svc", - "namespace_id": "3abab127-7669-4eb3-b9ef-44c04ad68d38", - "namespace_labels": { - "openshift_io/cluster-monitoring": "true" - }, - "flat_labels": [ - "catalogsource_operators_coreos_com/update=redhat-marketplace" - ] - }, - "message": "time=\"2020-09-23T20:47:03Z\" level=info msg=\"serving registry\" database=/database/index.db port=50051", - "level": "unknown", - "hostname": "ip-10-0-182-28.internal", - "pipeline_metadata": { - "collector": { - "ipaddr4": "10.0.182.28", - "inputname": "fluent-plugin-systemd", - "name": "fluentd", - "received_at": "2020-09-23T20:47:15.007583+00:00", - "version": "1.7.4 1.6.0" - } - }, - "@timestamp": "2020-09-23T20:47:03.422465+00:00", - "viaq_msg_id": "YmJmYTBlNDktMDMGQtMjE3NmFiOGUyOWM3", - "openshift": { - "labels": { - "logging": "infra" - } - } - }, - "fields": { - "@timestamp": [ - "2020-09-23T20:47:03.422Z" - ], - "pipeline_metadata.collector.received_at": [ - "2020-09-23T20:47:15.007Z" - ] - }, - "sort": [ - 1600894023422 - ] -} ----- -==== diff --git a/modules/cluster-logging-visualizer-launch.adoc b/modules/cluster-logging-visualizer-launch.adoc deleted file mode 100644 index 268ea7a9c092..000000000000 --- a/modules/cluster-logging-visualizer-launch.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-visualizer-launch_{context}"] -= Launching the log visualizer - -{product-title} uses Kibana as the log visualizer. Kibana is a browser-based console to query, discover, and visualize your logs through histograms, line graphs, -pie charts, heat maps, built-in geospatial support, and other visualizations. - -.Prerequisites - -* To list the *infra* and *audit* indices in Kibana, a user must have the `cluster-admin` role, the `cluster-reader` role, or both roles. The default `kubeadmin` user has proper permissions to list these indices. -+ -If you can view the pods and logs in the `default`, `kube-*` and `openshift-*` projects, you should be able to access these indices. You can use the following command to check if the current user has proper permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -.Procedure - -To launch Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. - -[NOTE] -==== -If you get a *security_exception* error in the Kibana console and cannot access your Kibana indices, you might have an expired OAuth token. If you see this error, log out of the Kibana console, and then log back in. This refreshes your OAuth tokens and you should be able to access your indices. -==== diff --git a/modules/cluster-machine-approver-operator.adoc b/modules/cluster-machine-approver-operator.adoc deleted file mode 100644 index e1a3953c68c0..000000000000 --- a/modules/cluster-machine-approver-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-machine-approver-operator_{context}"] -= Cluster Machine Approver Operator - -[discrete] -== Purpose - -The Cluster Machine Approver Operator automatically approves the CSRs requested for a new worker node after cluster installation. - -[NOTE] -==== -For the control plane node, the `approve-csr` service on the bootstrap node automatically approves all CSRs during the cluster bootstrapping phase. -==== - -[discrete] -== Project - -link:https://github.com/openshift/cluster-machine-approver[cluster-machine-approver-operator] diff --git a/modules/cluster-monitoring-operator.adoc b/modules/cluster-monitoring-operator.adoc deleted file mode 100644 index a9baf0310b3e..000000000000 --- a/modules/cluster-monitoring-operator.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-monitoring-operator_{context}"] -= Cluster Monitoring Operator - -[discrete] -== Purpose - -The Cluster Monitoring Operator manages and updates the Prometheus-based cluster monitoring stack deployed on top of {product-title}. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-monitoring-operator[openshift-monitoring] - -[discrete] -== CRDs - -* `alertmanagers.monitoring.coreos.com` -** Scope: Namespaced -** CR: `alertmanager` -** Validation: Yes -* `prometheuses.monitoring.coreos.com` -** Scope: Namespaced -** CR: `prometheus` -** Validation: Yes -* `prometheusrules.monitoring.coreos.com` -** Scope: Namespaced -** CR: `prometheusrule` -** Validation: Yes -* `servicemonitors.monitoring.coreos.com` -** Scope: Namespaced -** CR: `servicemonitor` -** Validation: Yes - -[discrete] -== Configuration objects - -[source,terminal] ----- -$ oc -n openshift-monitoring edit cm cluster-monitoring-config ----- diff --git a/modules/cluster-network-operator.adoc b/modules/cluster-network-operator.adoc deleted file mode 100644 index e5027d4ea013..000000000000 --- a/modules/cluster-network-operator.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-network-operator_{context}"] -= Cluster Network Operator - -[discrete] -== Purpose - -The Cluster Network Operator installs and upgrades the networking components on an {product-title} cluster. diff --git a/modules/cluster-node-tuning-operator-default-profiles-set.adoc b/modules/cluster-node-tuning-operator-default-profiles-set.adoc deleted file mode 100644 index 2bc11d2b3695..000000000000 --- a/modules/cluster-node-tuning-operator-default-profiles-set.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="custom-tuning-default-profiles-set_{context}"] -= Default profiles set on a cluster - -The following are the default profiles set on a cluster. - -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: default - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Optimize systems running OpenShift (provider specific parent profile) - include=-provider-${f:exec:cat:/var/lib/tuned/provider},openshift - name: openshift - recommend: - - profile: openshift-control-plane - priority: 30 - match: - - label: node-role.kubernetes.io/master - - label: node-role.kubernetes.io/infra - - profile: openshift-node - priority: 40 ----- - -Starting with {product-title} 4.9, all OpenShift TuneD profiles are shipped with -the TuneD package. You can use the `oc exec` command to view the contents of these profiles: - -[source,terminal] ----- -$ oc exec $tuned_pod -n openshift-cluster-node-tuning-operator -- find /usr/lib/tuned/openshift{,-control-plane,-node} -name tuned.conf -exec grep -H ^ {} \; ----- diff --git a/modules/cluster-node-tuning-operator-verify-profiles.adoc b/modules/cluster-node-tuning-operator-verify-profiles.adoc deleted file mode 100644 index 1f056a5689a0..000000000000 --- a/modules/cluster-node-tuning-operator-verify-profiles.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -[id="verifying-tuned-profiles-are-applied_{context}"] -= Verifying that the TuneD profiles are applied - -Verify the TuneD profiles that are applied to your cluster node. - -[source,terminal] ----- -$ oc get profile -n openshift-cluster-node-tuning-operator ----- - -.Example output -[source,terminal] ----- -NAME TUNED APPLIED DEGRADED AGE -master-0 openshift-control-plane True False 6h33m -master-1 openshift-control-plane True False 6h33m -master-2 openshift-control-plane True False 6h33m -worker-a openshift-node True False 6h28m -worker-b openshift-node True False 6h28m ----- - -* `NAME`: Name of the Profile object. There is one Profile object per node and their names match. -* `TUNED`: Name of the desired TuneD profile to apply. -* `APPLIED`: `True` if the TuneD daemon applied the desired profile. (`True/False/Unknown`). -* `DEGRADED`: `True` if any errors were reported during application of the TuneD profile (`True/False/Unknown`). -* `AGE`: Time elapsed since the creation of Profile object. - -The `ClusterOperator/node-tuning` object also contains useful information about the Operator and its node agents' health. For example, Operator misconfiguration is reported by `ClusterOperator/node-tuning` status messages. - -To get status information about the `ClusterOperator/node-tuning` object, run the following command: - -[source,terminal] ----- -$ oc get co/node-tuning -n openshift-cluster-node-tuning-operator ----- - -.Example output -[source,terminal,subs="attributes+"] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE -node-tuning {product-version}.1 True False True 60m 1/5 Profiles with bootcmdline conflict ----- - -If either the `ClusterOperator/node-tuning` or a profile object's status is `DEGRADED`, additional information is provided in the Operator or operand logs. diff --git a/modules/cluster-openshift-controller-manager-operators.adoc b/modules/cluster-openshift-controller-manager-operators.adoc deleted file mode 100644 index 52d18d574c48..000000000000 --- a/modules/cluster-openshift-controller-manager-operators.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-openshift-controller-manager-operator_{context}"] -= OpenShift Controller Manager Operator - -[discrete] -== Purpose - -The OpenShift Controller Manager Operator installs and maintains the `OpenShiftControllerManager` custom resource in a cluster and can be viewed with: - -[source,terminal] ----- -$ oc get clusteroperator openshift-controller-manager -o yaml ----- - -The custom resource definitino (CRD) `openshiftcontrollermanagers.operator.openshift.io` can be viewed in a cluster with: - -[source,terminal] ----- -$ oc get crd openshiftcontrollermanagers.operator.openshift.io -o yaml ----- - -[discrete] -== Project - -link:https://github.com/openshift/cluster-openshift-controller-manager-operator[cluster-openshift-controller-manager-operator] diff --git a/modules/cluster-resources.adoc b/modules/cluster-resources.adoc deleted file mode 100644 index 1efb54bcb609..000000000000 --- a/modules/cluster-resources.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: PROCEDURE -[id="support-cluster-resources_{context}"] -= Interacting with your cluster resources - -You can interact with cluster resources by using the OpenShift CLI (`oc`) tool in {product-title}. The cluster resources that you see after running the `oc api-resources` command can be edited. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have access to the web console or you have installed the `oc` CLI tool. - -.Procedure - -. To see which configuration Operators have been applied, run the following command: -+ -[source,terminal] ----- -$ oc api-resources -o name | grep config.openshift.io ----- - -. To see what cluster resources you can configure, run the following command: -+ -[source,terminal] ----- -$ oc explain .config.openshift.io ----- - -. To see the configuration of custom resource definition (CRD) objects in the cluster, run the following command: -+ -[source,terminal] ----- -$ oc get .config -o yaml ----- - -. To edit the cluster resource configuration, run the following command: -+ -[source,terminal] ----- -$ oc edit .config -o yaml ----- diff --git a/modules/cluster-samples-operator.adoc b/modules/cluster-samples-operator.adoc deleted file mode 100644 index 26328f29cd5b..000000000000 --- a/modules/cluster-samples-operator.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -// operators/operator-reference.adoc -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -// installing/cluster-capabilities.adoc -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-samples-operator_{context}"] -ifdef::operator-ref[= Cluster Samples Operator] -ifdef::cluster-caps[= OpenShift samples capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Samples Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] -The Cluster Samples Operator provides the features for the `openshift-samples` capability. -endif::cluster-caps[] - -The Cluster Samples Operator manages the sample image streams and templates stored in the `openshift` namespace. - -On initial start up, the Operator creates the default samples configuration resource to initiate the creation of the image streams and templates. The configuration object is a cluster scoped object with the key `cluster` and type `configs.samples`. - -The image streams are the {op-system-first}-based {product-title} image streams pointing to images on `registry.redhat.io`. Similarly, the templates are those categorized as {product-title} templates. - -ifdef::cluster-caps[] -If you disable the samples capability, users cannot access the image streams, samples, and templates it provides. Depending on your deployment, you might want to disable this component if you do not need it. -endif::[] - -ifdef::operator-ref[] -The Cluster Samples Operator deployment is contained within the `openshift-cluster-samples-operator` namespace. On start up, the install pull secret is used by the image stream import logic in the {product-registry} and API server to authenticate with `registry.redhat.io`. An administrator can create any additional secrets in the `openshift` namespace if they change the registry used for the sample image streams. If created, those secrets contain the content of a `config.json` for `docker` needed to facilitate image import. - -The image for the Cluster Samples Operator contains image stream and template definitions for the associated {product-title} release. After the Cluster Samples Operator creates a sample, it adds an annotation that denotes the {product-title} version that it is compatible with. The Operator uses this annotation to ensure that each sample matches the compatible release version. Samples outside of its inventory are ignored, as are skipped samples. - -Modifications to any samples that are managed by the Operator are allowed as long as the version annotation is not modified or deleted. However, on an upgrade, as the version annotation will change, those modifications can get replaced as the sample will be updated with the newer version. The Jenkins images are part of the image payload from the installation and are tagged into the image streams directly. - -The samples resource includes a finalizer, which cleans up the following upon its deletion: - -* Operator-managed image streams -* Operator-managed templates -* Operator-generated configuration resources -* Cluster status resources - -Upon deletion of the samples resource, the Cluster Samples Operator recreates the resource using the default configuration. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-samples-operator[cluster-samples-operator] -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-storage-operator.adoc b/modules/cluster-storage-operator.adoc deleted file mode 100644 index 794f37e0d3d4..000000000000 --- a/modules/cluster-storage-operator.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -[id="cluster-storage-operator_{context}"] -ifdef::operator-ref[= Cluster Storage Operator] -ifdef::cluster-caps[= Cluster storage capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Storage Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster Storage Operator provides the features for the `Storage` capability. - -endif::cluster-caps[] - -The Cluster Storage Operator sets {product-title} cluster-wide storage defaults. It ensures a default `storageclass` exists for {product-title} clusters. It also installs Container Storage Interface (CSI) drivers which enable your cluster to use various storage backends. - -ifdef::cluster-caps[] -[IMPORTANT] -==== -If the cluster storage capability is disabled, the cluster will not have a default `storageclass` or any CSI drivers. Users with administrator privileges can create a default `storageclass` and manually install CSI drivers if the cluster storage capability is disabled. -==== -endif::cluster-caps[] - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-storage-operator[cluster-storage-operator] - -[discrete] -== Configuration - -No configuration is required. - -endif::operator-ref[] - -[discrete] -== Notes - -* The storage class that the Operator creates can be made non-default by editing its annotation, but this storage class cannot be deleted as long as the Operator runs. - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-telemetry.adoc b/modules/cluster-telemetry.adoc deleted file mode 100644 index cb8c7a6cd256..000000000000 --- a/modules/cluster-telemetry.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: CONCEPT -[id="cluster-telemetry_{context}"] -ifndef::openshift-origin[] -= Telemetry access for {product-title} - -In {product-title} {product-version}, the Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, requires internet access. If your cluster is connected to the internet, Telemetry runs automatically, and your cluster is registered to {cluster-manager-url}. - -After you confirm that your {cluster-manager-url} inventory is correct, either maintained automatically by Telemetry or manually by using {cluster-manager}, link:https://access.redhat.com/documentation/en-us/subscription_central/2020-04/html/getting_started_with_subscription_watch/con-how-to-select-datacollection-tool_assembly-requirements-and-your-responsibilities-ctxt#red_hat_openshift[use subscription watch] to track your {product-title} subscriptions at the account or multi-cluster level. - -endif::openshift-origin[] diff --git a/modules/cluster-version-operator.adoc b/modules/cluster-version-operator.adoc deleted file mode 100644 index 4fea6b5bdcf1..000000000000 --- a/modules/cluster-version-operator.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-version-operator_{context}"] -= Cluster Version Operator - -[discrete] -== Purpose - -Cluster Operators manage specific areas of cluster functionality. The Cluster Version Operator (CVO) manages the lifecycle of cluster Operators, many of which are installed in {product-title} by default. - -The CVO also checks with the OpenShift Update Service to see the valid updates and update paths based on current component versions and information in the graph. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-version-operator[cluster-version-operator] diff --git a/modules/cluster-wide-proxy-preqs.adoc b/modules/cluster-wide-proxy-preqs.adoc deleted file mode 100644 index f495c5dc44c1..000000000000 --- a/modules/cluster-wide-proxy-preqs.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="cluster-wide-proxy-prereqs_{context}"] -= Prerequisites for configuring a cluster-wide proxy - -To configure a cluster-wide proxy, you must meet the following requirements. These requirements are valid when you configure a proxy during installation or post-installation. - -[discrete] -[id="cluster-wide-proxy-general-prereqs_{context}"] -== General requirements - -* You are the cluster owner. -* Your account has sufficient privileges. -ifdef::openshift-rosa[] -* You have an existing Virtual Private Cloud (VPC) for your cluster. -endif::openshift-rosa[] -ifdef::openshift-dedicated[] -* You have an existing Virtual Private Cloud (VPC) for your cluster. -* You are using the Customer Cloud Subscription (CCS) model for your cluster. -endif::openshift-dedicated[] -* The proxy can access the VPC for the cluster and the private subnets of the VPC. The proxy is also accessible from the VPC for the cluster and from the private subnets of the VPC. -* You have added the `ec2..amazonaws.com`, `elasticloadbalancing..amazonaws.com`, and `s3..amazonaws.com` endpoints to your VPC endpoint. These endpoints are required to complete requests from the nodes to the AWS EC2 API. Because the proxy works at the container level and not at the node level, you must route these requests to the AWS EC2 API through the AWS private network. Adding the public IP address of the EC2 API to your allowlist in your proxy server is not enough. - -[discrete] -[id="cluster-wide-proxy-network-prereqs_{context}"] -== Network requirements - -* If your proxy re-encyrpts egress traffic, you must create exclusions to the domain and port combinations. The following table offers guidance into these exceptions. -+ --- -** Add the following OpenShift URLs to your allowlist for re-encryption. -+ -[cols="6,1,6",options="header"] -|=== -|Address | Protocol/Port | Function -|`observatorium-mst.api.openshift.com` -|https/443 -|Required. Used for Managed OpenShift-specific telemetry. - -|`sso.redhat.com` -|https/443 -|The https://cloud.redhat.com/openshift site uses authentication from sso.redhat.com to download the cluster pull secret and use Red Hat SaaS solutions to facilitate monitoring of your subscriptions, cluster inventory, and chargeback reporting. -|=== -+ -** Add the following site reliability engineering (SRE) and management URLs to your allowlist for re-encryption. -+ -[cols="6,1,6",options="header"] -|=== -|Address | Protocol/Port | Function -|`*.osdsecuritylogs.splunkcloud.com` - -**OR** - -`inputs1.osdsecuritylogs.splunkcloud.com` -`inputs2.osdsecuritylogs.splunkcloud.com` -`inputs4.osdsecuritylogs.splunkcloud.com` -`inputs5.osdsecuritylogs.splunkcloud.com` -`inputs6.osdsecuritylogs.splunkcloud.com` -`inputs7.osdsecuritylogs.splunkcloud.com` -`inputs8.osdsecuritylogs.splunkcloud.com` -`inputs9.osdsecuritylogs.splunkcloud.com` -`inputs10.osdsecuritylogs.splunkcloud.com` -`inputs11.osdsecuritylogs.splunkcloud.com` -`inputs12.osdsecuritylogs.splunkcloud.com` -`inputs13.osdsecuritylogs.splunkcloud.com` -`inputs14.osdsecuritylogs.splunkcloud.com` -`inputs15.osdsecuritylogs.splunkcloud.com` -|tcp/9997 -|Used by the splunk-forwarder-operator as a log forwarding endpoint to be used by Red Hat SRE for log-based alerting. - -|`http-inputs-osdsecuritylogs.splunkcloud.com` -|https/443 -|Used by the splunk-forwarder-operator as a log forwarding endpoint to be used by Red Hat SRE for log-based alerting. -|=== --- -+ -[IMPORTANT] -==== -The use of a proxy server to perform TLS re-encryption is currently not supported if the server is acting as a transparent forward proxy where it is not configured on-cluster via the `--http-proxy` or `--https-proxy` arguments. - -A transparent forward proxy intercepts the cluster traffic, but it is not actually configured on the cluster itself. -==== diff --git a/modules/cnf-about-irq-affinity-setting.adoc b/modules/cnf-about-irq-affinity-setting.adoc deleted file mode 100644 index ec13c4e595d0..000000000000 --- a/modules/cnf-about-irq-affinity-setting.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="about_irq_affinity_setting_{context}"] -= About support of IRQ affinity setting - -Some IRQ controllers lack support for IRQ affinity setting and will always expose all online CPUs as the IRQ mask. These IRQ controllers effectively run on CPU 0. - -The following are examples of drivers and hardware that Red Hat are aware lack support for IRQ affinity setting. The list is, by no means, exhaustive: - -* Some RAID controller drivers, such as `megaraid_sas` -* Many non-volatile memory express (NVMe) drivers -* Some LAN on motherboard (LOM) network controllers -* The driver uses `managed_irqs` - -[NOTE] -==== -The reason they do not support IRQ affinity setting might be associated with factors such as the type of processor, the IRQ controller, or the circuitry connections in the motherboard. -==== - -If the effective affinity of any IRQ is set to an isolated CPU, it might be a sign of some hardware or driver not supporting IRQ affinity setting. To find the effective affinity, log in to the host and run the following command: - -[source,terminal] ----- -$ find /proc/irq/ -name effective_affinity -exec sh -c 'i="$1"; mask=$(cat $i); file=$(echo $i); echo $file: $mask' _ {} \; ----- - -.Example output - -[source,terminal] ----- -/proc/irq/0/effective_affinity: 1 -/proc/irq/1/effective_affinity: 8 -/proc/irq/2/effective_affinity: 0 -/proc/irq/3/effective_affinity: 1 -/proc/irq/4/effective_affinity: 2 -/proc/irq/5/effective_affinity: 1 -/proc/irq/6/effective_affinity: 1 -/proc/irq/7/effective_affinity: 1 -/proc/irq/8/effective_affinity: 1 -/proc/irq/9/effective_affinity: 2 -/proc/irq/10/effective_affinity: 1 -/proc/irq/11/effective_affinity: 1 -/proc/irq/12/effective_affinity: 4 -/proc/irq/13/effective_affinity: 1 -/proc/irq/14/effective_affinity: 1 -/proc/irq/15/effective_affinity: 1 -/proc/irq/24/effective_affinity: 2 -/proc/irq/25/effective_affinity: 4 -/proc/irq/26/effective_affinity: 2 -/proc/irq/27/effective_affinity: 1 -/proc/irq/28/effective_affinity: 8 -/proc/irq/29/effective_affinity: 4 -/proc/irq/30/effective_affinity: 4 -/proc/irq/31/effective_affinity: 8 -/proc/irq/32/effective_affinity: 8 -/proc/irq/33/effective_affinity: 1 -/proc/irq/34/effective_affinity: 2 ----- - -Some drivers use `managed_irqs`, whose affinity is managed internally by the kernel and userspace cannot change the affinity. In some cases, these IRQs might be assigned to isolated CPUs. For more information about `managed_irqs`, see link:https://access.redhat.com/solutions/4819541[Affinity of managed interrupts cannot be changed even if they target isolated CPU]. \ No newline at end of file diff --git a/modules/cnf-about-numa-aware-scheduling.adoc b/modules/cnf-about-numa-aware-scheduling.adoc deleted file mode 100644 index 7b711d8945f9..000000000000 --- a/modules/cnf-about-numa-aware-scheduling.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: CONCEPT -[id="cnf-about-numa-aware-scheduling_{context}"] -= About NUMA-aware scheduling - -Non-Uniform Memory Access (NUMA) is a compute platform architecture that allows different CPUs to access different regions of memory at different speeds. NUMA resource topology refers to the locations of CPUs, memory, and PCI devices relative to each other in the compute node. Co-located resources are said to be in the same _NUMA zone_. For high-performance applications, the cluster needs to process pod workloads in a single NUMA zone. - -NUMA architecture allows a CPU with multiple memory controllers to use any available memory across CPU complexes, regardless of where the memory is located. This allows for increased flexibility at the expense of performance. A CPU processing a workload using memory that is outside its NUMA zone is slower than a workload processed in a single NUMA zone. Also, for I/O-constrained workloads, the network interface on a distant NUMA zone slows down how quickly information can reach the application. High-performance workloads, such as telecommunications workloads, cannot operate to specification under these conditions. NUMA-aware scheduling aligns the requested cluster compute resources (CPUs, memory, devices) in the same NUMA zone to process latency-sensitive or high-performance workloads efficiently. NUMA-aware scheduling also improves pod density per compute node for greater resource efficiency. - -By integrating the Node Tuning Operator's performance profile with NUMA-aware scheduling, you can further configure CPU affinity to optimize performance for latency-sensitive workloads. - -The default {product-title} pod scheduler scheduling logic considers the available resources of the entire compute node, not individual NUMA zones. If the most restrictive resource alignment is requested in the kubelet topology manager, error conditions can occur when admitting the pod to a node. Conversely, if the most restrictive resource alignment is not requested, the pod can be admitted to the node without proper resource alignment, leading to worse or unpredictable performance. For example, runaway pod creation with `Topology Affinity Error` statuses can occur when the pod scheduler makes suboptimal scheduling decisions for guaranteed pod workloads by not knowing if the pod's requested resources are available. Scheduling mismatch decisions can cause indefinite pod startup delays. Also, depending on the cluster state and resource allocation, poor pod scheduling decisions can cause extra load on the cluster because of failed startup attempts. - -The NUMA Resources Operator deploys a custom NUMA resources secondary scheduler and other resources to mitigate against the shortcomings of the default {product-title} pod scheduler. The following diagram provides a high-level overview of NUMA-aware pod scheduling. - -.NUMA-aware scheduling overview -image::216_OpenShift_Topology-aware_Scheduling_0222.png[Diagram of NUMA-aware scheduling that shows how the various components interact with each other in the cluster] - -NodeResourceTopology API:: The `NodeResourceTopology` API describes the available NUMA zone resources in each compute node. -NUMA-aware scheduler:: The NUMA-aware secondary scheduler receives information about the available NUMA zones from the `NodeResourceTopology` API and schedules high-performance workloads on a node where it can be optimally processed. -Node topology exporter:: The node topology exporter exposes the available NUMA zone resources for each compute node to the `NodeResourceTopology` API. The node topology exporter daemon tracks the resource allocation from the kubelet by using the `PodResources` API. -PodResources API:: The `PodResources` API is local to each node and exposes the resource topology and available resources to the kubelet. -+ -[NOTE] -==== -The `List` endpoint of the `PodResources` API exposes exclusive CPUs allocated to a particular container. The API does not expose CPUs that belong to a shared pool. - -The `GetAllocatableResources` endpoint exposes allocatable resources available on a node. -==== diff --git a/modules/cnf-about-ptp-and-clock-synchronization.adoc b/modules/cnf-about-ptp-and-clock-synchronization.adoc deleted file mode 100644 index c60985b76fcd..000000000000 --- a/modules/cnf-about-ptp-and-clock-synchronization.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: CONCEPT -[id="cnf-about-ptp-and-clock-synchronization_{context}"] -= About PTP and clock synchronization error events - -Loss of PTP synchronization is a critical error for a RAN network. If synchronization is lost on a node, the radio might be shut down and the network Over the Air (OTA) traffic might be shifted to another node in the wireless network. Fast event notifications mitigate against workload errors by allowing cluster nodes to communicate PTP clock sync status to the vRAN application running in the DU. - -Event notifications are available to vRAN applications running on the same DU node. A publish-subscribe REST API passes events notifications to the messaging bus. Publish-subscribe messaging, or pub-sub messaging, is an asynchronous service-to-service communication architecture where any message published to a topic is immediately received by all of the subscribers to the topic. - -The PTP Operator generates fast event notifications for every PTP-capable network interface. You can access the events by using a `cloud-event-proxy` sidecar container over an HTTP or Advanced Message Queuing Protocol (AMQP) message bus. - -[NOTE] -==== -PTP fast event notifications are available for network interfaces configured to use PTP ordinary clocks or PTP boundary clocks. -==== - -include::snippets/ptp-amq-interconnect-eol.adoc[] diff --git a/modules/cnf-about-ptp-fast-event-notifications-framework.adoc b/modules/cnf-about-ptp-fast-event-notifications-framework.adoc deleted file mode 100644 index c14f62f40fec..000000000000 --- a/modules/cnf-about-ptp-fast-event-notifications-framework.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: CONCEPT -[id="cnf-about-ptp-fast-event-notifications-framework_{context}"] -= About the PTP fast event notifications framework - -Use the Precision Time Protocol (PTP) fast event notifications framework to subscribe cluster applications to PTP events that the bare-metal cluster node generates. - -[NOTE] -==== -The fast events notifications framework uses a REST API for communication. The REST API is based on the _O-RAN O-Cloud Notification API Specification for Event Consumers 3.0_ that is available from link:https://orandownloadsweb.azurewebsites.net/specifications[O-RAN ALLIANCE Specifications]. -==== - -The framework consists of a publisher, subscriber, and an AMQ or HTTP messaging protocol to handle communications between the publisher and subscriber applications. -Applications run the `cloud-event-proxy` container in a sidecar pattern to subscribe to PTP events. -The `cloud-event-proxy` sidecar container can access the same resources as the primary application container without using any of the resources of the primary application and with no significant latency. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -.Overview of PTP fast events -image::319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png[Overview of PTP fast events] - -image:darkcircle-1.png[20,20] Event is generated on the cluster host:: -`linuxptp-daemon` in the PTP Operator-managed pod runs as a Kubernetes `DaemonSet` and manages the various `linuxptp` processes (`ptp4l`, `phc2sys`, and optionally for grandmaster clocks, `ts2phc`). -The `linuxptp-daemon` passes the event to the UNIX domain socket. - -image:darkcircle-2.png[20,20] Event is passed to the cloud-event-proxy sidecar:: -The PTP plugin reads the event from the UNIX domain socket and passes it to the `cloud-event-proxy` sidecar in the PTP Operator-managed pod. -`cloud-event-proxy` delivers the event from the Kubernetes infrastructure to Cloud-Native Network Functions (CNFs) with low latency. - -image:darkcircle-3.png[20,20] Event is persisted:: -The `cloud-event-proxy` sidecar in the PTP Operator-managed pod processes the event and publishes the cloud-native event by using a REST API. -+ -[NOTE] -==== -When you use HTTP transport for events, you must persist the events subscription in the PTP Operator-managed pod by using a Persistent Volume (PV) resource or similar persistent storage mechanism. -==== - -image:darkcircle-4.png[20,20] Message is transported:: -The message transporter transports the event to the `cloud-event-proxy` sidecar in the application pod over HTTP or AMQP 1.0 QPID. - -image:darkcircle-5.png[20,20] Event is available from the REST API:: -The `cloud-event-proxy` sidecar in the Application pod processes the event and makes it available by using the REST API. - -image:darkcircle-6.png[20,20] Consumer application requests a subscription and receives the subscribed event:: -The consumer application sends an API request to the `cloud-event-proxy` sidecar in the application pod to create a PTP events subscription. -The `cloud-event-proxy` sidecar creates an AMQ or HTTP messaging listener protocol for the resource specified in the subscription. - -The `cloud-event-proxy` sidecar in the application pod receives the event from the PTP Operator-managed pod, unwraps the cloud events object to retrieve the data, and posts the event to the consumer application. -The consumer application listens to the address specified in the resource qualifier and receives and processes the PTP event. diff --git a/modules/cnf-about-the-profile-creator-tool.adoc b/modules/cnf-about-the-profile-creator-tool.adoc deleted file mode 100644 index 8ef33fa5d1cf..000000000000 --- a/modules/cnf-about-the-profile-creator-tool.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: CONCEPT -[id="cnf-about-the-profile-creator-tool_{context}"] -= About the Performance Profile Creator - -The Performance Profile Creator (PPC) is a command-line tool, delivered with the Node Tuning Operator, used to create the performance profile. -The tool consumes `must-gather` data from the cluster and several user-supplied profile arguments. The PPC generates a performance profile that is appropriate for your hardware and topology. - -The tool is run by one of the following methods: - -* Invoking `podman` - -* Calling a wrapper script diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc deleted file mode 100644 index af99ad355260..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc +++ /dev/null @@ -1,381 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="cnf-about-topology-aware-lifecycle-manager-blocking-crs_{context}"] -= Blocking ClusterGroupUpgrade CRs - -You can create multiple `ClusterGroupUpgrade` CRs and control their order of application. - -For example, if you create `ClusterGroupUpgrade` CR C that blocks the start of `ClusterGroupUpgrade` CR A, then `ClusterGroupUpgrade` CR A cannot start until the status of `ClusterGroupUpgrade` CR C becomes `UpgradeComplete`. - -One `ClusterGroupUpgrade` CR can have multiple blocking CRs. In this case, all the blocking CRs must complete before the upgrade for the current CR can start. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Save the content of the `ClusterGroupUpgrade` CRs in the `cgu-a.yaml`, `cgu-b.yaml`, and `cgu-c.yaml` files. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-a - namespace: default -spec: - blockingCRs: <1> - - name: cgu-c - namespace: default - clusters: - - spoke1 - - spoke2 - - spoke3 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - placementBindings: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - placementRules: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - remediationPlan: - - - spoke1 - - - spoke2 ----- -<1> Defines the blocking CRs. The `cgu-a` update cannot start until `cgu-c` is complete. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-b - namespace: default -spec: - blockingCRs: <1> - - name: cgu-a - namespace: default - clusters: - - spoke4 - - spoke5 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - placementRules: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke4 - - - spoke5 - status: {} ----- -<1> The `cgu-b` update cannot start until `cgu-a` is complete. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-c - namespace: default -spec: <1> - clusters: - - spoke6 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - managedPoliciesCompliantBeforeUpgrade: - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - placementRules: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke6 - status: {} ----- -<1> The `cgu-c` update does not have any blocking CRs. {cgu-operator} starts the `cgu-c` update when the `enable` field is set to `true`. - -. Create the `ClusterGroupUpgrade` CRs by running the following command for each relevant CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Start the update process by running the following command for each relevant CR: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/ \ ---type merge -p '{"spec":{"enable":true}}' ----- -+ -The following examples show `ClusterGroupUpgrade` CRs where the `enable` field is set to `true`: -+ -.Example for `cgu-a` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-a - namespace: default -spec: - blockingCRs: - - name: cgu-c - namespace: default - clusters: - - spoke1 - - spoke2 - - spoke3 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 -status: - conditions: - - message: 'The ClusterGroupUpgrade CR is blocked by other CRs that have not yet - completed: [cgu-c]' <1> - reason: UpgradeCannotStart - status: "False" - type: Ready - copiedPolicies: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - placementBindings: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - placementRules: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - remediationPlan: - - - spoke1 - - - spoke2 - status: {} ----- -<1> Shows the list of blocking CRs. -+ -.Example for `cgu-b` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-b - namespace: default -spec: - blockingCRs: - - name: cgu-a - namespace: default - clusters: - - spoke4 - - spoke5 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: 'The ClusterGroupUpgrade CR is blocked by other CRs that have not yet - completed: [cgu-a]' <1> - reason: UpgradeCannotStart - status: "False" - type: Ready - copiedPolicies: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - placementRules: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke4 - - - spoke5 - status: {} ----- -<1> Shows the list of blocking CRs. -+ -.Example for `cgu-c` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-c - namespace: default -spec: - clusters: - - spoke6 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR has upgrade policies that are still non compliant <1> - reason: UpgradeNotCompleted - status: "False" - type: Ready - copiedPolicies: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - managedPoliciesCompliantBeforeUpgrade: - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - placementRules: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke6 - status: - currentBatch: 1 - remediationPlanForBatch: - spoke6: 0 ----- -<1> The `cgu-c` update does not have any blocking CRs. diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc deleted file mode 100644 index 36254476b6b2..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="cnf-about-topology-aware-lifecycle-manager-config_{context}"] -= About the {cgu-operator-full} configuration - -The {cgu-operator-first} manages the deployment of {rh-rhacm-first} policies for one or more {product-title} clusters. Using {cgu-operator} in a large network of clusters allows the phased rollout of policies to the clusters in limited batches. This helps to minimize possible service disruptions when updating. With {cgu-operator}, you can control the following actions: - -* The timing of the update -* The number of {rh-rhacm}-managed clusters -* The subset of managed clusters to apply the policies to -* The update order of the clusters -* The set of policies remediated to the cluster -* The order of policies remediated to the cluster -* The assignment of a canary cluster - -For {sno}, the {cgu-operator-first} offers the following features: - -* Create a backup of a deployment before an upgrade -* Pre-caching images for clusters with limited bandwidth - -{cgu-operator} supports the orchestration of the {product-title} y-stream and z-stream updates, and day-two operations on y-streams and z-streams. diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc deleted file mode 100644 index 4fcbde5b0b33..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="cnf-about-topology-aware-lifecycle-manager-about-policies_{context}"] -= About managed policies used with {cgu-operator-full} - -The {cgu-operator-first} uses {rh-rhacm} policies for cluster updates. - -{cgu-operator} can be used to manage the rollout of any policy CR where the `remediationAction` field is set to `inform`. -Supported use cases include the following: - -* Manual user creation of policy CRs -* Automatically generated policies from the `PolicyGenTemplate` custom resource definition (CRD) - -For policies that update an Operator subscription with manual approval, {cgu-operator} provides additional functionality that approves the installation of the updated Operator. - -For more information about managed policies, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html-single/governance/index#policy-overview[Policy Overview] in the {rh-rhacm} documentation. - -For more information about the `PolicyGenTemplate` CRD, see the "About the PolicyGenTemplate CRD" section in "Configuring managed clusters with policies and PolicyGenTemplate resources". diff --git a/modules/cnf-about-virtual-routing-and-forwarding.adoc b/modules/cnf-about-virtual-routing-and-forwarding.adoc deleted file mode 100644 index aec1274e411a..000000000000 --- a/modules/cnf-about-virtual-routing-and-forwarding.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// networking/multiple_networks/about-virtual-routing-and-forwarding.adoc - -:_content-type: CONCEPT -[id="cnf-about-virtual-routing-and-forwarding_{context}"] -= About virtual routing and forwarding - -Virtual routing and forwarding (VRF) devices combined with IP rules provide the ability to create virtual routing and forwarding domains. VRF reduces the number of permissions needed by CNF, and provides increased visibility of the network topology of secondary networks. VRF is used to provide multi-tenancy functionality, for example, where each tenant has its own unique routing tables and requires different default gateways. - -Processes can bind a socket to the VRF device. Packets through the binded socket use the routing table associated with the VRF device. An important feature of VRF is that it impacts only OSI model layer 3 traffic and above so L2 tools, such as LLDP, are not affected. This allows higher priority IP rules such as policy based routing to take precedence over the VRF device rules directing specific traffic. - -[id="cnf-benefits-secondary-networks-telecommunications-operators_{context}"] -== Benefits of secondary networks for pods for telecommunications operators - -In telecommunications use cases, each CNF can potentially be connected to multiple different networks sharing the same address space. These secondary networks can potentially conflict with the cluster's main network CIDR. Using the CNI VRF plugin, network functions can be connected to different customers' infrastructure using the same IP address, keeping different customers isolated. IP addresses are overlapped with {product-title} IP space. The CNI VRF plugin also reduces the number of permissions needed by CNF and increases the visibility of network topologies of secondary networks. diff --git a/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc b/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc deleted file mode 100644 index 621e42793fd3..000000000000 --- a/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="about_hyperthreading_for_low_latency_and_real_time_applications_{context}"] -= About hyperthreading for low latency and real-time applications - -Hyperthreading is an Intel processor technology that allows a physical CPU processor core to function as two logical cores, executing two independent threads simultaneously. Hyperthreading allows for better system throughput for certain workload types where parallel processing is beneficial. The default {product-title} configuration expects hyperthreading to be enabled by default. - -For telecommunications applications, it is important to design your application infrastructure to minimize latency as much as possible. Hyperthreading can slow performance times and negatively affect throughput for compute intensive workloads that require low latency. Disabling hyperthreading ensures predictable performance and can decrease processing times for these workloads. - -[NOTE] -==== -Hyperthreading implementation and configuration differs depending on the hardware you are running {product-title} on. Consult the relevant host hardware tuning information for more details of the hyperthreading implementation specific to that hardware. Disabling hyperthreading can increase the cost per core of the cluster. -==== diff --git a/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc b/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc deleted file mode 100644 index df17beef2cd8..000000000000 --- a/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="adjusting-nic-queues-with-the-performance-profile_{context}"] -= Adjusting the NIC queues with the performance profile - -The performance profile lets you adjust the queue count for each network device. - -Supported network devices: - -* Non-virtual network devices - -* Network devices that support multiple queues (channels) - -Unsupported network devices: - -* Pure software network interfaces - -* Block devices - -* Intel DPDK virtual functions - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Install the OpenShift CLI (`oc`). - -.Procedure - -. Log in to the {product-title} cluster running the Node Tuning Operator as a user with `cluster-admin` privileges. - -. Create and apply a performance profile appropriate for your hardware and topology. For guidance on creating a profile, see the "Creating a performance profile" section. - -. Edit this created performance profile: -+ -[source,terminal] ----- -$ oc edit -f .yaml ----- - -. Populate the `spec` field with the `net` object. The object list can contain two fields: - -* `userLevelNetworking` is a required field specified as a boolean flag. If `userLevelNetworking` is `true`, the queue count is set to the reserved CPU count for all supported devices. The default is `false`. -* `devices` is an optional field specifying a list of devices that will have the queues set to the reserved CPU count. If the device list is empty, the configuration applies to all network devices. The configuration is as follows: -** `interfaceName`: This field specifies the interface name, and it supports shell-style wildcards, which can be positive or negative. -*** Example wildcard syntax is as follows: ` .*` -*** Negative rules are prefixed with an exclamation mark. To apply the net queue changes to all devices other than the excluded list, use `!`, for example, `!eno1`. -** `vendorID`: The network device vendor ID represented as a 16-bit hexadecimal number with a `0x` prefix. -** `deviceID`: The network device ID (model) represented as a 16-bit hexadecimal number with a `0x` prefix. -+ -[NOTE] -==== -When a `deviceID` is specified, the `vendorID` must also be defined. A device that matches all of the device identifiers specified in a device entry `interfaceName`, `vendorID`, or a pair of `vendorID` plus `deviceID` qualifies as a network device. This network device then has its net queues count set to the reserved CPU count. - -When two or more devices are specified, the net queues count is set to any net device that matches one of them. -==== - -. Set the queue count to the reserved CPU count for all devices by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices matching any of the defined device identifiers by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth0” - - interfaceName: “eth1” - - vendorID: “0x1af4” - - deviceID: “0x1000” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices starting with the interface name `eth` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth*” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices with an interface named anything other than `eno1` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “!eno1” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices that have an interface name `eth0`, `vendorID` of `0x1af4`, and `deviceID` of `0x1000` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth0” - - vendorID: “0x1af4” - - deviceID: “0x1000” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Apply the updated performance profile: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cnf-allocating-multiple-huge-page-sizes.adoc b/modules/cnf-allocating-multiple-huge-page-sizes.adoc deleted file mode 100644 index e79e3da910f2..000000000000 --- a/modules/cnf-allocating-multiple-huge-page-sizes.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// CNF-538 Promote Multiple Huge Pages Sizes for Pods and Containers to beta -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-allocating-multiple-huge-page-sizes_{context}"] -= Allocating multiple huge page sizes - -You can request huge pages with different sizes under the same container. This allows you to define more complicated pods consisting of containers with different huge page size needs. - -For example, you can define sizes `1G` and `2M` and the Node Tuning Operator will configure both sizes on the node, as shown here: - -[source,yaml] ----- -spec: - hugepages: - defaultHugepagesSize: 1G - pages: - - count: 1024 - node: 0 - size: 2M - - count: 4 - node: 1 - size: 1G ----- \ No newline at end of file diff --git a/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc b/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc deleted file mode 100644 index a76720346154..000000000000 --- a/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// networking/multiple_networks/assigning-a-secondary-network-to-a-vrf.adoc - - -:_content-type: PROCEDURE -[id="cnf-assigning-a-secondary-network-to-a-vrf_{context}"] -= Assigning a secondary network to a VRF - -As a cluster administrator, you can configure an additional network for your VRF domain by using the CNI VRF plugin. The virtual network created by this plugin is associated with a physical interface that you specify. - -[NOTE] -==== -Applications that use VRFs need to bind to a specific device. The common usage is to use the `SO_BINDTODEVICE` option for a socket. `SO_BINDTODEVICE` binds the socket to a device that is specified in the passed interface name, for example, `eth1`. To use `SO_BINDTODEVICE`, the application must have `CAP_NET_RAW` capabilities. - -Using a VRF through the `ip vrf exec` command is not supported in {product-title} pods. To use VRF, bind applications directly to the VRF interface. -==== - -[id="cnf-creating-an-additional-network-attachment-with-the-cni-vrf-plug-in_{context}"] -== Creating an additional network attachment with the CNI VRF plugin - -The Cluster Network Operator (CNO) manages additional network definitions. When you specify an additional network to create, the CNO creates the `NetworkAttachmentDefinition` custom resource (CR) automatically. - -[NOTE] -==== -Do not edit the `NetworkAttachmentDefinition` CRs that the Cluster Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To create an additional network attachment with the CNI VRF plugin, perform the following procedure. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the OpenShift cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `Network` custom resource (CR) for the additional network attachment and insert the `rawCNIConfig` configuration for the additional network, as in the following example CR. Save the YAML as the file `additional-network-attachment.yaml`. -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster - spec: - additionalNetworks: - - name: test-network-1 - namespace: additional-network-1 - type: Raw - rawCNIConfig: '{ - "cniVersion": "0.3.1", - "name": "macvlan-vrf", - "plugins": [ <1> - { - "type": "macvlan", <2> - "master": "eth1", - "ipam": { - "type": "static", - "addresses": [ - { - "address": "191.168.1.23/24" - } - ] - } - }, - { - "type": "vrf", - "vrfname": "example-vrf-name", <3> - "table": 1001 <4> - }] - }' ----- -<1> `plugins` must be a list. The first item in the list must be the secondary network underpinning the VRF network. The second item in the list is the VRF plugin configuration. -<2> `type` must be set to `vrf`. -<3> `vrfname` is the name of the VRF that the interface is assigned to. If it does not exist in the pod, it is created. -<4> Optional. `table` is the routing table ID. By default, the `tableid` parameter is used. If it is not specified, the CNI assigns a free routing table ID to the VRF. -+ -[NOTE] -==== -VRF functions correctly only when the resource is of type `netdevice`. -==== - -. Create the `Network` resource: -+ -[source,terminal] ----- -$ oc create -f additional-network-attachment.yaml ----- - -. Confirm that the CNO created the `NetworkAttachmentDefinition` CR by running the following command. Replace `` with the namespace that you specified when configuring the network attachment, for example, `additional-network-1`. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -additional-network-1 14m ----- -+ -[NOTE] -==== -There might be a delay before the CNO creates the CR. -==== - -.Verifying that the additional VRF network attachment is successful - -To verify that the VRF CNI is correctly configured and the additional network attachment is attached, do the following: - -. Create a network that uses the VRF CNI. -. Assign the network to a pod. -. Verify that the pod network attachment is connected to the VRF additional network. Remote shell into the pod and run the following command: -+ -[source,terminal] ----- -$ ip vrf show ----- -+ -.Example output -+ -[source,terminal] ----- -Name Table ------------------------ -red 10 ----- -. Confirm the VRF interface is master of the secondary interface: -+ -[source,terminal] ----- -$ ip link ----- -+ -.Example output -+ -[source,terminal] ----- -5: net1: mtu 1500 qdisc noqueue master red state UP mode ----- diff --git a/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc b/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc deleted file mode 100644 index 209f39f079bf..000000000000 --- a/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -//networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: PROCEDURE -[id="cnf-assigning-a-sriov-network-to-a-vrf_{context}"] -= Assigning an SR-IOV network to a VRF - -As a cluster administrator, you can assign an SR-IOV network interface to your VRF domain by using the CNI VRF plugin. - -To do this, add the VRF configuration to the optional `metaPlugins` parameter of the `SriovNetwork` resource. - -[NOTE] -==== -Applications that use VRFs need to bind to a specific device. The common usage is to use the `SO_BINDTODEVICE` option for a socket. `SO_BINDTODEVICE` binds the socket to a device that is specified in the passed interface name, for example, `eth1`. To use `SO_BINDTODEVICE`, the application must have `CAP_NET_RAW` capabilities. - -Using a VRF through the `ip vrf exec` command is not supported in {product-title} pods. To use VRF, bind applications directly to the VRF interface. -==== - -[id="cnf-creating-an-additional-sriov-network-with-vrf-plug-in_{context}"] -== Creating an additional SR-IOV network attachment with the CNI VRF plugin - -The SR-IOV Network Operator manages additional network definitions. When you specify an additional SR-IOV network to create, the SR-IOV Network Operator creates the `NetworkAttachmentDefinition` custom resource (CR) automatically. - -[NOTE] -==== -Do not edit `NetworkAttachmentDefinition` custom resources that the SR-IOV Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To create an additional SR-IOV network attachment with the CNI VRF plugin, perform the following procedure. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the {product-title} cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `SriovNetwork` custom resource (CR) for the additional SR-IOV network attachment and insert the `metaPlugins` configuration, as in the following example CR. Save the YAML as the file `sriov-network-attachment.yaml`. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: example-network - namespace: additional-sriov-network-1 -spec: - ipam: | - { - "type": "host-local", - "subnet": "10.56.217.0/24", - "rangeStart": "10.56.217.171", - "rangeEnd": "10.56.217.181", - "routes": [{ - "dst": "0.0.0.0/0" - }], - "gateway": "10.56.217.1" - } - vlan: 0 - resourceName: intelnics - metaPlugins : | - { - "type": "vrf", <1> - "vrfname": "example-vrf-name" <2> - } ----- -<1> `type` must be set to `vrf`. -<2> `vrfname` is the name of the VRF that the interface is assigned to. If it does not exist in the pod, it is created. - -. Create the `SriovNetwork` resource: -+ -[source,terminal] ----- -$ oc create -f sriov-network-attachment.yaml ----- - -.Verifying that the `NetworkAttachmentDefinition` CR is successfully created - -* Confirm that the SR-IOV Network Operator created the `NetworkAttachmentDefinition` CR by running the following command. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n <1> ----- -<1> Replace `` with the namespace that you specified when configuring the network attachment, for example, `additional-sriov-network-1`. -+ -.Example output -[source,terminal] ----- -NAME AGE -additional-sriov-network-1 14m ----- -+ -[NOTE] -==== -There might be a delay before the SR-IOV Network Operator creates the CR. -==== - -.Verifying that the additional SR-IOV network attachment is successful - -To verify that the VRF CNI is correctly configured and the additional SR-IOV network attachment is attached, do the following: - -. Create an SR-IOV network that uses the VRF CNI. -. Assign the network to a pod. -. Verify that the pod network attachment is connected to the SR-IOV additional network. Remote shell into the pod and run the following command: -+ -[source,terminal] ----- -$ ip vrf show ----- -+ -.Example output -[source,terminal] ----- -Name Table ------------------------ -red 10 ----- -. Confirm the VRF interface is master of the secondary interface: -+ -[source,terminal] ----- -$ ip link ----- -+ -.Example output -[source,terminal] ----- -... -5: net1: mtu 1500 qdisc noqueue master red state UP mode -... ----- diff --git a/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc b/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc deleted file mode 100644 index a68a4b76b1d4..000000000000 --- a/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// CNF-43 Associate Secondary Interfaces Metrics to Network Attachments -// Module included in the following assemblies: -// -// *networking/associating-secondary-interfaces-metrics-to-network-attachments.adoc - -[id="cnf-associating-secondary-interfaces-metrics-to-network-attachments_{context}"] -= Extending secondary network metrics for monitoring - -Secondary devices, or interfaces, are used for different purposes. It is important to have a way to classify them to be able to aggregate the metrics for secondary devices with the same classification. - -Exposed metrics contain the interface but do not specify where the interface originates. This is workable when there are no additional interfaces. However, if secondary interfaces are added, it can be difficult to use the metrics since it is hard to identify interfaces using only interface names. - -When adding secondary interfaces, their names depend on the order in which they are added, and different secondary interfaces might belong to different networks and can be used for different purposes. - -With `pod_network_name_info` it is possible to extend the current metrics with additional information that identifies the interface type. In this way, it is possible to aggregate the metrics and to add specific alarms to specific interface types. - -The network type is generated using the name of the related `NetworkAttachmentDefinition`, that in turn is used to differentiate different classes of secondary networks. For example, different interfaces belonging to different networks or using different CNIs use different network attachment definition names. - -[id="cnf-associating-secondary-interfaces-metrics-to-network-attachments-network-metrics-daemon_{context}"] -== Network Metrics Daemon - -The Network Metrics Daemon is a daemon component that collects and publishes network related metrics. - -The kubelet is already publishing network related metrics you can observe. These metrics are: - -* `container_network_receive_bytes_total` -* `container_network_receive_errors_total` -* `container_network_receive_packets_total` -* `container_network_receive_packets_dropped_total` -* `container_network_transmit_bytes_total` -* `container_network_transmit_errors_total` -* `container_network_transmit_packets_total` -* `container_network_transmit_packets_dropped_total` - -The labels in these metrics contain, among others: - -* Pod name -* Pod namespace -* Interface name (such as `eth0`) - -These metrics work well until new interfaces are added to the pod, for example via https://github.com/intel/multus-cni[Multus], as it is not clear what the interface names refer to. - -The interface label refers to the interface name, but it is not clear what that interface is meant for. In case of many different interfaces, it would be impossible to understand what network the metrics you are monitoring refer to. - -This is addressed by introducing the new `pod_network_name_info` described in the following section. - -[id="cnf-associating-secondary-interfaces-metrics-with-network-name_{context}"] -== Metrics with network name - -This daemonset publishes a `pod_network_name_info` gauge metric, with a fixed value of `0`: - -[source,bash] ----- -pod_network_name_info{interface="net0",namespace="namespacename",network_name="nadnamespace/firstNAD",pod="podname"} 0 ----- - -The network name label is produced using the annotation added by Multus. It is the concatenation of the namespace the network attachment definition belongs to, plus the name of the network attachment definition. - -The new metric alone does not provide much value, but combined with the network related `container_network_*` metrics, it offers better support for monitoring secondary networks. - -Using a `promql` query like the following ones, it is possible to get a new metric containing the value and the network name retrieved from the `k8s.v1.cni.cncf.io/network-status` annotation: - -[source,bash] ----- -(container_network_receive_bytes_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_errors_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_packets_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_packets_dropped_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_bytes_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_errors_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_packets_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_packets_dropped_total) + on(namespace,pod,interface) group_left(network_name) ----- diff --git a/modules/cnf-checking-numa-aware-scheduler-logs.adoc b/modules/cnf-checking-numa-aware-scheduler-logs.adoc deleted file mode 100644 index 061922aad803..000000000000 --- a/modules/cnf-checking-numa-aware-scheduler-logs.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-checking-numa-aware-scheduler-logs_{context}"] -= Checking the NUMA-aware scheduler logs - -Troubleshoot problems with the NUMA-aware scheduler by reviewing the logs. If required, you can increase the scheduler log level by modifying the `spec.logLevel` field of the `NUMAResourcesScheduler` resource. Acceptable values are `Normal`, `Debug`, and `Trace`, with `Trace` being the most verbose option. - -[NOTE] -==== -To change the log level of the secondary scheduler, delete the running scheduler resource and re-deploy it with the changed log level. The scheduler is unavailable for scheduling new workloads during this downtime. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Delete the currently running `NUMAResourcesScheduler` resource: - -.. Get the active `NUMAResourcesScheduler` by running the following command: -+ -[source,terminal] ----- -$ oc get NUMAResourcesScheduler ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesscheduler 90m ----- - -.. Delete the secondary scheduler resource by running the following command: -+ -[source,terminal] ----- -$ oc delete NUMAResourcesScheduler numaresourcesscheduler ----- -+ -.Example output -[source,terminal] ----- -numaresourcesscheduler.nodetopology.openshift.io "numaresourcesscheduler" deleted ----- - -. Save the following YAML in the file `nro-scheduler-debug.yaml`. This example changes the log level to `Debug`: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - logLevel: Debug ----- - -. Create the updated `Debug` logging `NUMAResourcesScheduler` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler-debug.yaml ----- -+ -.Example output -[source,terminal] ----- -numaresourcesscheduler.nodetopology.openshift.io/numaresourcesscheduler created ----- - -.Verification steps - -. Check that the NUMA-aware scheduler was successfully deployed: - -.. Run the following command to check that the CRD is created succesfully: -+ -[source,terminal] ----- -$ oc get crd | grep numaresourcesschedulers ----- -+ -.Example output -[source,terminal] ----- -NAME CREATED AT -numaresourcesschedulers.nodetopology.openshift.io 2022-02-25T11:57:03Z ----- - -.. Check that the new custom scheduler is available by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesscheduler 3h26m ----- - -. Check that the logs for the scheduler shows the increased log level: - -.. Get the list of pods running in the `openshift-numaresources` namespace by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numaresources-controller-manager-d87d79587-76mrm 1/1 Running 0 46h -numaresourcesoperator-worker-5wm2k 2/2 Running 0 45h -numaresourcesoperator-worker-pb75c 2/2 Running 0 45h -secondary-scheduler-7976c4d466-qm4sc 1/1 Running 0 21m ----- - -.. Get the logs for the secondary scheduler pod by running the following command: -+ -[source,terminal] ----- -$ oc logs secondary-scheduler-7976c4d466-qm4sc -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -... -I0223 11:04:55.614788 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.Namespace total 11 items received -I0223 11:04:56.609114 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.ReplicationController total 10 items received -I0223 11:05:22.626818 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.StorageClass total 7 items received -I0223 11:05:31.610356 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.PodDisruptionBudget total 7 items received -I0223 11:05:31.713032 1 eventhandlers.go:186] "Add event for scheduled pod" pod="openshift-marketplace/certified-operators-thtvq" -I0223 11:05:53.461016 1 eventhandlers.go:244] "Delete event for scheduled pod" pod="openshift-marketplace/certified-operators-thtvq" ----- diff --git a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc b/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc deleted file mode 100644 index 4b6bd4f1b9fd..000000000000 --- a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// CNF-643 Support and debugging tools for CNF -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support_{context}"] -= Collecting low latency tuning debugging data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. - -The `must-gather` tool enables you to collect diagnostic information about your {product-title} cluster, including node tuning, NUMA topology, and other information needed to debug issues with low latency setup. - -For prompt support, supply diagnostic information for both {product-title} and low latency tuning. - -[id="cnf-about-must-gather_{context}"] -== About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues, such as: - -* Resource definitions -* Audit logs -* Service logs - -You can specify one or more images when you run the command by including the `--image` argument. When you specify an image, the tool collects data related to that feature or product. When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in your current working directory. - -[id="cnf-about-collecting-low-latency-data_{context}"] -== Gathering low latency tuning data - -Use the `oc adm must-gather` CLI command to collect information about your cluster, including features and objects associated with low latency tuning, including: - -* The Node Tuning Operator namespaces and child objects. -* `MachineConfigPool` and associated `MachineConfig` objects. -* The Node Tuning Operator and associated Tuned objects. -* Linux kernel command line options. -* CPU and NUMA topology -* Basic PCI device information and NUMA locality. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The {product-title} CLI (oc) installed. - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. - -. Collect debugging information by running the following command: -+ -[source,terminal] ----- -$ oc adm must-gather ----- -+ -.Example output -+ -[source,terminal] ----- -[must-gather ] OUT Using must-gather plug-in image: quay.io/openshift-release -When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: -ClusterID: 829er0fa-1ad8-4e59-a46e-2644921b7eb6 -ClusterVersion: Stable at "" -ClusterOperators: - All healthy and stable - - -[must-gather ] OUT namespace/openshift-must-gather-8fh4x created -[must-gather ] OUT clusterrolebinding.rbac.authorization.k8s.io/must-gather-rhlgc created -[must-gather-5564g] POD 2023-07-17T10:17:37.610340849Z Gathering data for ns/openshift-cluster-version... -[must-gather-5564g] POD 2023-07-17T10:17:38.786591298Z Gathering data for ns/default... -[must-gather-5564g] POD 2023-07-17T10:17:39.117418660Z Gathering data for ns/openshift... -[must-gather-5564g] POD 2023-07-17T10:17:39.447592859Z Gathering data for ns/kube-system... -[must-gather-5564g] POD 2023-07-17T10:17:39.803381143Z Gathering data for ns/openshift-etcd... - -... - -Reprinting Cluster State: -When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: -ClusterID: 829er0fa-1ad8-4e59-a46e-2644921b7eb6 -ClusterVersion: Stable at "" -ClusterOperators: - All healthy and stable ----- - -. Create a compressed file from the `must-gather` directory that was created in your working directory. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather-local.5421342344627712289// <1> ----- -+ -<1> Replace `must-gather-local.5421342344627712289//` with the directory name created by the `must-gather` tool. -+ -[NOTE] -==== -Create a compressed file to attach the data to a support case or to use with the Performance Profile Creator wrapper script when you create a performance profile. -==== - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc b/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc deleted file mode 100644 index 46538da7f3dd..000000000000 --- a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="configuring_for_irq_dynamic_load_balancing_{context}"] -= Configuring a node for IRQ dynamic load balancing - -Configure a cluster node for IRQ dynamic load balancing to control which cores can receive device interrupt requests (IRQ). - -.Prerequisites - -* For core isolation, all server hardware components must support IRQ affinity. To check if the hardware components of your server support IRQ affinity, view the server's hardware specifications or contact your hardware provider. - -.Procedure - -. Log in to the {product-title} cluster as a user with cluster-admin privileges. -. Set the performance profile `apiVersion` to use `performance.openshift.io/v2`. -. Remove the `globallyDisableIrqLoadBalancing` field or set it to `false`. -. Set the appropriate isolated and reserved CPUs. The following snippet illustrates a profile that reserves 2 CPUs. IRQ load-balancing is enabled for pods running on the `isolated` CPU set: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: dynamic-irq-profile -spec: - cpu: - isolated: 2-5 - reserved: 0-1 -... ----- -+ -[NOTE] -==== -When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -==== - -. Create the pod that uses exclusive CPUs, and set `irq-load-balancing.crio.io` and `cpu-quota.crio.io` annotations to `disable`. For example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dynamic-irq-pod - annotations: - irq-load-balancing.crio.io: "disable" - cpu-quota.crio.io: "disable" -spec: - containers: - - name: dynamic-irq-pod - image: "registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version}" - command: ["sleep", "10h"] - resources: - requests: - cpu: 2 - memory: "200M" - limits: - cpu: 2 - memory: "200M" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - runtimeClassName: performance-dynamic-irq-profile -... ----- - -. Enter the pod `runtimeClassName` in the form performance-, where is the `name` from the `PerformanceProfile` YAML, in this example, `performance-dynamic-irq-profile`. -. Set the node selector to target a cnf-worker. -. Ensure the pod is running correctly. Status should be `running`, and the correct cnf-worker node should be set: -+ -[source,terminal] ----- -$ oc get pod -o wide ----- -+ -.Expected output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -dynamic-irq-pod 1/1 Running 0 5h33m ----- -. Get the CPUs that the pod configured for IRQ dynamic load balancing runs on: -+ -[source,terminal] ----- -$ oc exec -it dynamic-irq-pod -- /bin/bash -c "grep Cpus_allowed_list /proc/self/status | awk '{print $2}'" ----- -+ -.Expected output -+ -[source,terminal] ----- -Cpus_allowed_list: 2-3 ----- -. Ensure the node configuration is applied correctly. Log in to the node to verify the configuration. -+ -[source,terminal] ----- -$ oc debug node/ ----- -+ -.Expected output -+ -[source,terminal] ----- -Starting pod/-debug ... -To use host binaries, run `chroot /host` - -Pod IP: -If you don't see a command prompt, try pressing enter. - -sh-4.4# ----- - -. Verify that you can use the node file system: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- -+ -.Expected output -+ -[source,terminal] ----- -sh-4.4# ----- - -. Ensure the default system CPU affinity mask does not include the `dynamic-irq-pod` CPUs, for example, CPUs 2 and 3. -+ -[source,terminal] ----- -$ cat /proc/irq/default_smp_affinity ----- -+ -.Example output -+ -[source,terminal] ----- -33 ----- -. Ensure the system IRQs are not configured to run on the `dynamic-irq-pod` CPUs: -+ -[source,terminal] ----- -find /proc/irq/ -name smp_affinity_list -exec sh -c 'i="$1"; mask=$(cat $i); file=$(echo $i); echo $file: $mask' _ {} \; ----- -+ -.Example output -+ -[source,terminal] ----- -/proc/irq/0/smp_affinity_list: 0-5 -/proc/irq/1/smp_affinity_list: 5 -/proc/irq/2/smp_affinity_list: 0-5 -/proc/irq/3/smp_affinity_list: 0-5 -/proc/irq/4/smp_affinity_list: 0 -/proc/irq/5/smp_affinity_list: 0-5 -/proc/irq/6/smp_affinity_list: 0-5 -/proc/irq/7/smp_affinity_list: 0-5 -/proc/irq/8/smp_affinity_list: 4 -/proc/irq/9/smp_affinity_list: 4 -/proc/irq/10/smp_affinity_list: 0-5 -/proc/irq/11/smp_affinity_list: 0 -/proc/irq/12/smp_affinity_list: 1 -/proc/irq/13/smp_affinity_list: 0-5 -/proc/irq/14/smp_affinity_list: 1 -/proc/irq/15/smp_affinity_list: 0 -/proc/irq/24/smp_affinity_list: 1 -/proc/irq/25/smp_affinity_list: 1 -/proc/irq/26/smp_affinity_list: 1 -/proc/irq/27/smp_affinity_list: 5 -/proc/irq/28/smp_affinity_list: 1 -/proc/irq/29/smp_affinity_list: 0 -/proc/irq/30/smp_affinity_list: 0-5 ----- \ No newline at end of file diff --git a/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc b/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc deleted file mode 100644 index d401f9b4fd4d..000000000000 --- a/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-fifo-priority-scheduling-for-ptp_{context}"] -= Configuring FIFO priority scheduling for PTP hardware - -In telco or other deployment configurations that require low latency performance, PTP daemon threads run in a constrained CPU footprint alongside the rest of the infrastructure components. By default, PTP threads run with the `SCHED_OTHER` policy. Under high load, these threads might not get the scheduling latency they require for error-free operation. - -To mitigate against potential scheduling latency errors, you can configure the PTP Operator `linuxptp` services to allow threads to run with a `SCHED_FIFO` policy. If `SCHED_FIFO` is set for a `PtpConfig` CR, then `ptp4l` and `phc2sys` will run in the parent container under `chrt` with a priority set by the `ptpSchedulingPriority` field of the `PtpConfig` CR. - -[NOTE] -==== -Setting `ptpSchedulingPolicy` is optional, and is only required if you are experiencing latency errors. -==== - -.Procedure - -. Edit the `PtpConfig` CR profile: -+ -[source,terminal] ----- -$ oc edit PtpConfig -n openshift-ptp ----- - -. Change the `ptpSchedulingPolicy` and `ptpSchedulingPriority` fields: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: - namespace: openshift-ptp -... -spec: - profile: - - name: "profile1" -... - ptpSchedulingPolicy: SCHED_FIFO <1> - ptpSchedulingPriority: 10 <2> ----- -<1> Scheduling policy for `ptp4l` and `phc2sys` processes. Use `SCHED_FIFO` on systems that support FIFO scheduling. -<2> Required. Sets the integer value 1-65 used to configure FIFO priority for `ptp4l` and `phc2sys` processes. - -. Save and exit to apply the changes to the `PtpConfig` CR. - -.Verification - -. Get the name of the `linuxptp-daemon` pod and corresponding node where the `PtpConfig` CR has been applied: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-gmv2n 3/3 Running 0 1d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-lgm55 3/3 Running 0 1d17h 10.1.196.25 compute-1.example.com -ptp-operator-3r4dcvf7f4-zndk7 1/1 Running 0 1d7h 10.129.0.61 control-plane-1.example.com ----- - -. Check that the `ptp4l` process is running with the updated `chrt` FIFO priority: -+ -[source,terminal] ----- -$ oc -n openshift-ptp logs linuxptp-daemon-lgm55 -c linuxptp-daemon-container|grep chrt ----- -+ -.Example output -[source,terminal] ----- -I1216 19:24:57.091872 1600715 daemon.go:285] /bin/chrt -f 65 /usr/sbin/ptp4l -f /var/run/ptp4l.0.config -2 --summary_interval -4 -m ----- - - diff --git a/modules/cnf-configuring-huge-pages.adoc b/modules/cnf-configuring-huge-pages.adoc deleted file mode 100644 index 6b631d888f23..000000000000 --- a/modules/cnf-configuring-huge-pages.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -//CNF-78 (4.4) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-configuring-huge-pages_{context}"] -= Configuring huge pages - -Nodes must pre-allocate huge pages used in an {product-title} cluster. Use the Node Tuning Operator to allocate huge pages on a specific node. - -{product-title} provides a method for creating and allocating huge pages. Node Tuning Operator provides an easier method for doing this using the performance profile. - -For example, in the `hugepages` `pages` section of the performance profile, you can specify multiple blocks of `size`, `count`, and, optionally, `node`: - -[source,yaml] ----- -hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 4 - node: 0 <1> ----- - -<1> `node` is the NUMA node in which the huge pages are allocated. If you omit `node`, the pages are evenly spread across all NUMA nodes. - -[NOTE] -==== -Wait for the relevant machine config pool status that indicates the update is finished. -==== - -These are the only configuration steps you need to do to allocate huge pages. - - -.Verification - -* To verify the configuration, see the `/proc/meminfo` file on the node: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-141-105.ec2.internal ----- -+ -[source,terminal] ----- -# grep -i huge /proc/meminfo ----- -+ -.Example output -[source,terminal] ----- -AnonHugePages: ###### ## -ShmemHugePages: 0 kB -HugePages_Total: 2 -HugePages_Free: 2 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: #### ## -Hugetlb: #### ## ----- - -* Use `oc describe` to report the new size: -+ -[source,terminal] ----- -$ oc describe node worker-0.ocp4poc.example.com | grep -i huge ----- -+ -.Example output -[source,terminal] ----- - hugepages-1g=true - hugepages-###: ### - hugepages-###: ### ----- diff --git a/modules/cnf-configuring-log-filtering-for-linuxptp.adoc b/modules/cnf-configuring-log-filtering-for-linuxptp.adoc deleted file mode 100644 index 8d7de05a016b..000000000000 --- a/modules/cnf-configuring-log-filtering-for-linuxptp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-log-filtering-for-linuxptp_{context}"] -= Configuring log filtering for linuxptp services - -The `linuxptp` daemon generates logs that you can use for debugging purposes. In telco or other deployment configurations that feature a limited storage capacity, these logs can add to the storage demand. - -To reduce the number log messages, you can configure the `PtpConfig` custom resource (CR) to exclude log messages that report the `master offset` value. The `master offset` log message reports the difference between the current node's clock and the master clock in nanoseconds. - -.Prerequisites -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the PTP Operator. - -.Procedure - -. Edit the `PtpConfig` CR: -+ -[source,terminal] ----- -$ oc edit PtpConfig -n openshift-ptp ----- - -. In `spec.profile`, add the `ptpSettings.logReduce` specification and set the value to `true`: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: - namespace: openshift-ptp -... -spec: - profile: - - name: "profile1" -... - ptpSettings: - logReduce: "true" ----- -+ -[NOTE] -==== -For debugging purposes, you can revert this specification to `False` to include the master offset messages. -==== - -. Save and exit to apply the changes to the `PtpConfig` CR. - -.Verification - -. Get the name of the `linuxptp-daemon` pod and corresponding node where the `PtpConfig` CR has been applied: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-gmv2n 3/3 Running 0 1d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-lgm55 3/3 Running 0 1d17h 10.1.196.25 compute-1.example.com -ptp-operator-3r4dcvf7f4-zndk7 1/1 Running 0 1d7h 10.129.0.61 control-plane-1.example.com ----- - -. Verify that master offset messages are excluded from the logs by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ptp logs -c linuxptp-daemon-container | grep "master offset" <1> ----- -<1> is the name of the `linuxptp-daemon` pod, for example `linuxptp-daemon-gmv2n`. -+ -When you configure the `logReduce` specification, this command does not report any instances of `master offset` in the logs of the `linuxptp` daemon. \ No newline at end of file diff --git a/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc b/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc deleted file mode 100644 index b1c89e14c665..000000000000 --- a/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE - -[id="cnf-configuring-node-groups-for-the-numaresourcesoperator_{context}"] -= Optional: Configuring polling operations for NUMA resources updates - -The daemons controlled by the NUMA Resources Operator in their `nodeGroup` poll resources to retrieve updates about available NUMA resources. You can fine-tune polling operations for these daemons by configuring the `spec.nodeGroups` specification in the `NUMAResourcesOperator` custom resource (CR). This provides advanced control of polling operations. Configure these specifications to improve scheduling behaviour and troubleshoot suboptimal scheduling decisions. - -The configuration options are the following: - -* `infoRefreshMode`: Determines the trigger condition for polling the kubelet. The NUMA Resources Operator reports the resulting information to the API server. -* `infoRefreshPeriod`: Determines the duration between polling updates. -* `podsFingerprinting`: Determines if point-in-time information for the current set of pods running on a node is exposed in polling updates. -+ -[NOTE] -==== -`podsFingerprinting` is enabled by default. `podsFingerprinting` is a requirement for the `cacheResyncPeriod` specification in the `NUMAResourcesScheduler` CR. The `cacheResyncPeriod` specification helps to report more exact resource availability by monitoring pending resources on nodes. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -* Configure the `spec.nodeGroups` specification in your `NUMAResourcesOperator` CR: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - config: - infoRefreshMode: Periodic <1> - infoRefreshPeriod: 10s <2> - podsFingerprinting: Enabled <3> - name: worker ----- -<1> Valid values are `Periodic`, `Events`, `PeriodicAndEvents`. Use `Periodic` to poll the kubelet at intervals that you define in `infoRefreshPeriod`. Use `Events` to poll the kubelet at every pod lifecycle event. Use `PeriodicAndEvents` to enable both methods. -<2> Define the polling interval for `Periodic` or `PeriodicAndEvents` refresh modes. The field is ignored if the refresh mode is `Events`. -<3> Valid values are `Enabled` or `Disabled`. Setting to `Enabled` is a requirement for the `cacheResyncPeriod` specification in the `NUMAResourcesScheduler`. - -.Verification - -. After you deploy the NUMA Resources Operator, verify that the node group configurations were applied by running the following command: -+ -[source,terminal] ----- -$ oc get numaresop numaresourcesoperator -o json | jq '.status' ----- -+ -.Example output -[source,terminal] ----- - ... - - "config": { - "infoRefreshMode": "Periodic", - "infoRefreshPeriod": "10s", - "podsFingerprinting": "Enabled" - }, - "name": "worker" - - ... ----- diff --git a/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc b/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc deleted file mode 100644 index 0eab42101b5f..000000000000 --- a/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-the-ptp-fast-event-publisher_{context}"] -= Configuring the PTP fast event notifications publisher - -To start using PTP fast event notifications for a network interface in your cluster, you must enable the fast event publisher in the PTP Operator `PtpOperatorConfig` custom resource (CR) and configure `ptpClockThreshold` values in a `PtpConfig` CR that you create. - -.Prerequisites - -* You have installed the {product-title} CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You have installed the PTP Operator. - -* When you use HTTP events transport, configure dynamic volume provisioning in the cluster or manually create `StorageClass`, `LocalVolume`, and `PersistentVolume` resources to persist the events subscription. -+ -[NOTE] -==== -When you enable dynamic volume provisioning in the cluster, a `PersistentVolume` resource is automatically created for the `PersistentVolumeClaim` that the PTP Operator deploys. - -For more information about manually creating persistent storage in the cluster, see "Persistent storage using local volumes". -==== - -.Procedure - -. Modify the default PTP Operator config to enable PTP fast events. - -.. Save the following YAML in the `ptp-operatorconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpOperatorConfig -metadata: - name: default - namespace: openshift-ptp -spec: - daemonNodeSelector: - node-role.kubernetes.io/worker: "" - ptpEventConfig: - enableEventPublisher: true <1> - storageType: "example-storage-class" <2> ----- -<1> Set `enableEventPublisher` to `true` to enable PTP fast event notifications. -<2> Use the value that you set for `storageType` to populate the `StorageClassName` field for the `PersistentVolumeClaim` (`PVC`) resource that the PTP Operator automatically deploys. -The `PVC` resource is used to persist consumer event subscriptions. -+ -[NOTE] -==== -In {product-title} 4.13 or later, you do not need to set the `spec.ptpEventConfig.transportHost` field in the `PtpOperatorConfig` resource when you use HTTP transport for PTP events. -Set `transportHost` only when you use AMQP transport for PTP events. - -The value that you set for `.spec.storageType` in the `PtpOperatorConfig` CR must match the `storageClassName` that is set in the `PersistentVolume` CR. -If `storageType` is not set and the `transportHost` uses HTTP, the PTP daemons are not deployed. -==== - -.. Update the `PtpOperatorConfig` CR: -+ -[source,terminal] ----- -$ oc apply -f ptp-operatorconfig.yaml ----- - -. Create a `PtpConfig` custom resource (CR) for the PTP enabled interface, and set the required values for `ptpClockThreshold` and `ptp4lOpts`. -The following YAML illustrates the required values that you must set in the `PtpConfig` CR: -+ -[source,yaml] ----- -spec: - profile: - - name: "profile1" - interface: "enp5s0f0" - ptp4lOpts: "-2 -s --summary_interval -4" <1> - phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" <2> - ptp4lConf: "" <3> - ptpClockThreshold: <4> - holdOverTimeout: 5 - maxOffsetThreshold: 100 - minOffsetThreshold: -100 ----- -<1> Append `--summary_interval -4` to use PTP fast events. -<2> Required `phc2sysOpts` values. `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. -<3> Specify a string that contains the configuration to replace the default `/etc/ptp4l.conf` file. To use the default configuration, leave the field empty. -<4> Optional. If the `ptpClockThreshold` stanza is not present, default values are used for the `ptpClockThreshold` fields. The stanza shows default `ptpClockThreshold` values. The `ptpClockThreshold` values configure how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. diff --git a/modules/cnf-configuring-workload-hints.adoc b/modules/cnf-configuring-workload-hints.adoc deleted file mode 100644 index 52a39eec5873..000000000000 --- a/modules/cnf-configuring-workload-hints.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="configuring-workload-hints_{context}"] -= Configuring workload hints manually - -.Procedure - -. Create a `PerformanceProfile` appropriate for the environment's hardware and topology as described in the table in "Understanding workload hints". Adjust the profile to match the expected workload. In this example, we tune for the lowest possible latency. - -. Add the `highPowerConsumption` and `realTime` workload hints. Both are set to `true` here. -+ -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: workload-hints - spec: - ... - workloadHints: - highPowerConsumption: true <1> - realTime: true <2> ----- -<1> If `highPowerConsumption` is `true`, the node is tuned for very low latency at the cost of increased power consumption. -<2> Disables some debugging and monitoring features that can affect system latency. diff --git a/modules/cnf-cpu-infra-container.adoc b/modules/cnf-cpu-infra-container.adoc deleted file mode 100644 index 819f524ae863..000000000000 --- a/modules/cnf-cpu-infra-container.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-cpu-infra-container_{context}"] -= Restricting CPUs for infra and application containers - -Generic housekeeping and workload tasks use CPUs in a way that may impact latency-sensitive processes. By default, the container runtime uses all online CPUs to run all containers together, which can result in context switches and spikes in latency. Partitioning the CPUs prevents noisy processes from interfering with latency-sensitive processes by separating them from each other. The following table describes how processes run on a CPU after you have tuned the node using the Node Tuning Operator: - -.Process' CPU assignments -[%header,cols=2*] -|=== -|Process type -|Details - -|`Burstable` and `BestEffort` pods -|Runs on any CPU except where low latency workload is running - -|Infrastructure pods -|Runs on any CPU except where low latency workload is running - -|Interrupts -|Redirects to reserved CPUs (optional in {product-title} 4.7 and later) - -|Kernel processes -|Pins to reserved CPUs - -|Latency-sensitive workload pods -|Pins to a specific set of exclusive CPUs from the isolated pool - -|OS processes/systemd services -|Pins to reserved CPUs -|=== - -The allocatable capacity of cores on a node for pods of all QoS process types, `Burstable`, `BestEffort`, or `Guaranteed`, is equal to the capacity of the isolated pool. The capacity of the reserved pool is removed from the node's total core capacity for use by the cluster and operating system housekeeping duties. - -.Example 1 -A node features a capacity of 100 cores. Using a performance profile, the cluster administrator allocates 50 cores to the isolated pool and 50 cores to the reserved pool. The cluster administrator assigns 25 cores to QoS `Guaranteed` pods and 25 cores for `BestEffort` or `Burstable` pods. This matches the capacity of the isolated pool. - -.Example 2 -A node features a capacity of 100 cores. Using a performance profile, the cluster administrator allocates 50 cores to the isolated pool and 50 cores to the reserved pool. The cluster administrator assigns 50 cores to QoS `Guaranteed` pods and one core for `BestEffort` or `Burstable` pods. This exceeds the capacity of the isolated pool by one core. Pod scheduling fails because of insufficient CPU capacity. - - -The exact partitioning pattern to use depends on many factors like hardware, workload characteristics and the expected system load. Some sample use cases are as follows: - -* If the latency-sensitive workload uses specific hardware, such as a network interface controller (NIC), ensure that the CPUs in the isolated pool are as close as possible to this hardware. At a minimum, you should place the workload in the same Non-Uniform Memory Access (NUMA) node. - -* The reserved pool is used for handling all interrupts. When depending on system networking, allocate a sufficiently-sized reserve pool to handle all the incoming packet interrupts. In {product-version} and later versions, workloads can optionally be labeled as sensitive. - -The decision regarding which specific CPUs should be used for reserved and isolated partitions requires detailed analysis and measurements. Factors like NUMA affinity of devices and memory play a role. The selection also depends on the workload architecture and the specific use case. - -[IMPORTANT] -==== -The reserved and isolated CPU pools must not overlap and together must span all available cores in the worker node. -==== - -To ensure that housekeeping tasks and workloads do not interfere with each other, specify two groups of CPUs in the `spec` section of the performance profile. - -* `isolated` - Specifies the CPUs for the application container workloads. These CPUs have the lowest latency. Processes in this group have no interruptions and can, for example, reach much higher DPDK zero packet loss bandwidth. - -* `reserved` - Specifies the CPUs for the cluster and operating system housekeeping duties. Threads in the `reserved` group are often busy. Do not run latency-sensitive applications in the `reserved` group. Latency-sensitive applications run in the `isolated` group. - -.Procedure - -. Create a performance profile appropriate for the environment's hardware and topology. - -. Add the `reserved` and `isolated` parameters with the CPUs you want reserved and isolated for the infra and application containers: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: infra-cpus -spec: - cpu: - reserved: "0-4,9" <1> - isolated: "5-8" <2> - nodeSelector: <3> - node-role.kubernetes.io/worker: "" ----- -<1> Specify which CPUs are for infra containers to perform cluster and operating system housekeeping duties. -<2> Specify which CPUs are for application containers to run workloads. -<3> Optional: Specify a node selector to apply the performance profile to specific nodes. diff --git a/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc b/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc deleted file mode 100644 index 6067f8f474f4..000000000000 --- a/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-creating-nrop-cr-with-manual-performance-settings_{context}"] -= Creating the NUMAResourcesOperator custom resource with manual performance settings - -When you have installed the NUMA Resources Operator, then create the `NUMAResourcesOperator` custom resource (CR) that instructs the NUMA Resources Operator to install all the cluster infrastructure needed to support the NUMA-aware scheduler, including daemon sets and APIs. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -. Optional: Create the `MachineConfigPool` custom resource that enables custom kubelet configurations for worker nodes: -+ -[NOTE] -==== -By default, {product-title} creates a `MachineConfigPool` resource for worker nodes in the cluster. You can create a custom `MachineConfigPool` resource if required. -==== - -.. Save the following YAML in the `nro-machineconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - labels: - cnf-worker-tuning: enabled - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - name: worker -spec: - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" ----- - -.. Create the `MachineConfigPool` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-machineconfig.yaml ----- - -. Create the `NUMAResourcesOperator` custom resource: - -.. Save the following YAML in the `nrop.yaml` file: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> ----- -<1> Should match the label applied to worker nodes in the related `MachineConfigPool` CR. - -.. Create the `NUMAResourcesOperator` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nrop.yaml ----- - -.Verification - -* Verify that the NUMA Resources Operator deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesoperator 10m ----- diff --git a/modules/cnf-creating-nrop-cr.adoc b/modules/cnf-creating-nrop-cr.adoc deleted file mode 100644 index 140716e9175e..000000000000 --- a/modules/cnf-creating-nrop-cr.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-creating-nrop-cr_{context}"] -= Creating the NUMAResourcesOperator custom resource - -When you have installed the NUMA Resources Operator, then create the `NUMAResourcesOperator` custom resource (CR) that instructs the NUMA Resources Operator to install all the cluster infrastructure needed to support the NUMA-aware scheduler, including daemon sets and APIs. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -. Create the `NUMAResourcesOperator` custom resource: - -.. Save the following YAML in the `nrop.yaml` file: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" ----- - -.. Create the `NUMAResourcesOperator` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nrop.yaml ----- - -.Verification - -* Verify that the NUMA Resources Operator deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesoperator 10m ----- diff --git a/modules/cnf-creating-the-performance-profile-object.adoc b/modules/cnf-creating-the-performance-profile-object.adoc deleted file mode 100644 index be2f3dd0f761..000000000000 --- a/modules/cnf-creating-the-performance-profile-object.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-creating-the-performance-profile-object_{context}"] -= Creating the PerformanceProfile object - -Create the `PerformanceProfile` object using the object that is posted to the cluster. -After you have specified your settings, the `PerformanceProfile` object is compiled into multiple objects: - -* A `Machine.Config` file that manipulates the nodes. -* A `KubeletConfig` file that configures the Topology Manager, the CPU Manager, and the {product-title} nodes. -* The Tuned profile that configures the Node Tuning Operator. - -.Procedure - -. Prepare a cluster. - -. Create a Machine ConfigPool. - -. Install the Performance Profile Operator. - -. Create a performance profile that is appropriate for your hardware and topology. -In the performance profile, you can specify whether to update the kernel to kernel-rt, the CPUs that -will be reserved for housekeeping, and CPUs that will be used for running the workloads. -+ -This is a typical performance profile: -+ ----- -apiversion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: -spec: - cpu: - isolated: “1-3” - reserved: “0” - hugepages: - defaultHugepagesSize: “1Gi” - pages: -size: “1Gi” - count: 4 - node: 0 -realTimeKernel: - enabled: true - numa: - topologyPolicy: “best-effort” ----- - -. Specify two groups of CPUs in the `spec` section: -+ -`isolated` - Has the lowest latency. Because processes in this group have no interruptions, there is zero packet loss. -+ -`reserved` - The housekeeping CPUs. Threads in the reserved group tend to be very busy, so latency-sensitive -applications should be run in the isolated group. -See link:https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed[Create a pod that gets assigned a QoS class of `Guaranteed`]. - -For example, you can reserve cores (threads) from a single NUMA node and put your workloads on another NUMA node. -The reason for this is that the housekeeping CPUs may be touching caches in the CPU. -Keeping your workloads on a separate NUMA node prevents the nodes from interfering with each other. -Additionally, each NUMA node has its own memory bus that is not shared. diff --git a/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc b/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc deleted file mode 100644 index c4b7582cac5f..000000000000 --- a/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-303 (4.5) -// scalability_and_performance/cnf-low-latency-tuning.adoc -//CNF-303 Performance add-ons status CNF-372 -//Performance Addon Operator Detailed Status -//See: https://issues.redhat.com/browse/CNF-379 (Yanir Quinn) - -[id="cnf-debugging-low-latency-cnf-tuning-status_{context}"] -= Debugging low latency CNF tuning status - -The `PerformanceProfile` custom resource (CR) contains status fields for reporting tuning status and debugging latency degradation issues. These fields report on conditions that describe the state of the operator's reconciliation functionality. - -A typical issue can arise when the status of machine config pools that are attached to the performance profile are in a degraded state, causing the `PerformanceProfile` status to degrade. In this case, the machine config pool issues a failure message. - -The Node Tuning Operator contains the `performanceProfile.spec.status.Conditions` status field: - -[source,bash] ----- -Status: - Conditions: - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: True - Type: Available - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: True - Type: Upgradeable - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: False - Type: Progressing - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: False - Type: Degraded ----- - -The `Status` field contains `Conditions` that specify `Type` values that indicate the status of the performance profile: - -`Available`:: All machine configs and Tuned profiles have been created successfully and are available for cluster components are responsible to process them (NTO, MCO, Kubelet). - -`Upgradeable`:: Indicates whether the resources maintained by the Operator are in a state that is safe to upgrade. - -`Progressing`:: Indicates that the deployment process from the performance profile has started. - -`Degraded`:: Indicates an error if: -+ -* Validation of the performance profile has failed. -* Creation of all relevant components did not complete successfully. - -Each of these types contain the following fields: - -`Status`:: The state for the specific type (`true` or `false`). -`Timestamp`:: The transaction timestamp. -`Reason string`:: The machine readable reason. -`Message string`:: The human readable reason describing the state and error details, if any. - -[id="cnf-debugging-low-latency-cnf-tuning-status-machineconfigpools_{context}"] -== Machine config pools - -A performance profile and its created products are applied to a node according to an associated machine config pool (MCP). The MCP holds valuable information about the progress of applying the machine configurations created by performance profiles that encompass kernel args, kube config, huge pages allocation, and deployment of rt-kernel. The Performance Profile controller monitors changes in the MCP and updates the performance profile status accordingly. - -The only conditions returned by the MCP to the performance profile status is when the MCP is `Degraded`, which leads to `performaceProfile.status.condition.Degraded = true`. - -.Example - -The following example is for a performance profile with an associated machine config pool (`worker-cnf`) that was created for it: - -. The associated machine config pool is in a degraded state: -+ -[source,terminal] ----- -# oc get mcp ----- -+ -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-2ee57a93fa6c9181b546ca46e1571d2d True False False 3 3 3 0 2d21h -worker rendered-worker-d6b2bdc07d9f5a59a6b68950acf25e5f True False False 2 2 2 0 2d21h -worker-cnf rendered-worker-cnf-6c838641b8a08fff08dbd8b02fb63f7c False True True 2 1 1 1 2d20h ----- - -. The `describe` section of the MCP shows the reason: -+ -[source,terminal] ----- -# oc describe mcp worker-cnf ----- -+ -.Example output -+ -[source,terminal] ----- - Message: Node node-worker-cnf is reporting: "prepping update: - machineconfig.machineconfiguration.openshift.io \"rendered-worker-cnf-40b9996919c08e335f3ff230ce1d170\" not - found" - Reason: 1 nodes are reporting degraded status on sync ----- - -. The degraded state should also appear under the performance profile `status` field marked as `degraded = true`: -+ -[source,terminal] ----- -# oc describe performanceprofiles performance ----- -+ -.Example output -+ -[source,terminal] ----- -Message: Machine config pool worker-cnf Degraded Reason: 1 nodes are reporting degraded status on sync. -Machine config pool worker-cnf Degraded Message: Node yquinn-q8s5v-w-b-z5lqn.c.openshift-gce-devel.internal is -reporting: "prepping update: machineconfig.machineconfiguration.openshift.io -\"rendered-worker-cnf-40b9996919c08e335f3ff230ce1d170\" not found". Reason: MCPDegraded - Status: True - Type: Degraded ----- diff --git a/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc b/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc deleted file mode 100644 index 9f523817fb82..000000000000 --- a/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings_{context}"] -= Deploying the NUMA-aware secondary pod scheduler with manual performance settings - -After you install the NUMA Resources Operator, do the following to deploy the NUMA-aware secondary pod scheduler: - -* Configure the pod admittance policy for the required machine profile - -* Create the required machine config pool - -* Deploy the NUMA-aware secondary scheduler - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator. - -.Procedure -. Create the `KubeletConfig` custom resource that configures the pod admittance policy for the machine profile: - -.. Save the following YAML in the `nro-kubeletconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: cnf-worker-tuning -spec: - machineConfigPoolSelector: - matchLabels: - cnf-worker-tuning: enabled - kubeletConfig: - cpuManagerPolicy: "static" <1> - cpuManagerReconcilePeriod: "5s" - reservedSystemCPUs: "0,1" - memoryManagerPolicy: "Static" <2> - evictionHard: - memory.available: "100Mi" - kubeReserved: - memory: "512Mi" - reservedMemory: - - numaNode: 0 - limits: - memory: "1124Mi" - systemReserved: - memory: "512Mi" - topologyManagerPolicy: "single-numa-node" <3> - topologyManagerScope: "pod" ----- -<1> For `cpuManagerPolicy`, `static` must use a lowercase `s`. -<2> For `memoryManagerPolicy`, `Static` must use an uppercase `S`. -<3> `topologyManagerPolicy` must be set to `single-numa-node`. - -.. Create the `KubeletConfig` custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-kubeletconfig.yaml ----- - -. Create the `NUMAResourcesScheduler` custom resource that deploys the NUMA-aware custom pod scheduler: - -.. Save the following YAML in the `nro-scheduler.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - cacheResyncPeriod: "5s" <1> ----- -<1> Enter an interval value in seconds for synchronization of the scheduler cache. A value of `5s` is typical for most implementations. -+ -[NOTE] -==== -* Enable the `cacheResyncPeriod` specification to help the NUMA Resource Operator report more exact resource availability by monitoring pending resources on nodes and synchronizing this information in the scheduler cache at a defined interval. This also helps to minimize `Topology Affinity Error` errors because of sub-optimal scheduling decisions. The lower the interval the greater the network load. The `cacheResyncPeriod` specification is disabled by default. - -* Setting a value of `Enabled` for the `podsFingerprinting` specification in the `NUMAResourcesOperator` CR is a requirement for the implementation of the `cacheResyncPeriod` specification. -==== - -.. Create the `NUMAResourcesScheduler` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler.yaml ----- - -.Verification - -* Verify that the required resources deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get all -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 13m -pod/numaresourcesoperator-worker-dvj4n 2/2 Running 0 16m -pod/numaresourcesoperator-worker-lcg4t 2/2 Running 0 16m -pod/secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 16m -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/numaresourcesoperator-worker 2 2 2 2 2 node-role.kubernetes.io/worker= 16m -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/numaresources-controller-manager 1/1 1 1 13m -deployment.apps/secondary-scheduler 1/1 1 1 16m -NAME DESIRED CURRENT READY AGE -replicaset.apps/numaresources-controller-manager-7575848485 1 1 1 13m -replicaset.apps/secondary-scheduler-56994cf6cf 1 1 1 16m ----- diff --git a/modules/cnf-deploying-the-numa-aware-scheduler.adoc b/modules/cnf-deploying-the-numa-aware-scheduler.adoc deleted file mode 100644 index 4d30c1bc3da4..000000000000 --- a/modules/cnf-deploying-the-numa-aware-scheduler.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-deploying-the-numa-aware-scheduler_{context}"] -= Deploying the NUMA-aware secondary pod scheduler - -After you install the NUMA Resources Operator, do the following to deploy the NUMA-aware secondary pod scheduler: - -* Configure the performance profile. - -* Deploy the NUMA-aware secondary scheduler. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Create the required machine config pool. - -* Install the NUMA Resources Operator. - -.Procedure - -. Create the `PerformanceProfile` custom resource (CR): - -.. Save the following YAML in the `nro-perfprof.yaml` file: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: perfprof-nrop -spec: - cpu: <1> - isolated: "4-51,56-103" - reserved: "0,1,2,3,52,53,54,55" - nodeSelector: - node-role.kubernetes.io/worker: "" - numa: - topologyPolicy: single-numa-node ----- -<1> The `cpu.isolated` and `cpu.reserved` specifications define ranges for isolated and reserved CPUs. Enter valid values for your CPU configuration. See the _Additional resources_ section for more information about configuring a performance profile. - -.. Create the `PerformanceProfile` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-perfprof.yaml ----- -+ -.Example output -[source,terminal] ----- -performanceprofile.performance.openshift.io/perfprof-nrop created ----- - -. Create the `NUMAResourcesScheduler` custom resource that deploys the NUMA-aware custom pod scheduler: - -.. Save the following YAML in the `nro-scheduler.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - cacheResyncPeriod: "5s" <1> ----- -<1> Enter an interval value in seconds for synchronization of the scheduler cache. A value of `5s` is typical for most implementations. -+ -[NOTE] -==== -* Enable the `cacheResyncPeriod` specification to help the NUMA Resource Operator report more exact resource availability by monitoring pending resources on nodes and synchronizing this information in the scheduler cache at a defined interval. This also helps to minimize `Topology Affinity Error` errors because of sub-optimal scheduling decisions. The lower the interval the greater the network load. The `cacheResyncPeriod` specification is disabled by default. - -* Setting a value of `Enabled` for the `podsFingerprinting` specification in the `NUMAResourcesOperator` CR is a requirement for the implementation of the `cacheResyncPeriod` specification. -==== - -.. Create the `NUMAResourcesScheduler` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler.yaml ----- - -.Verification - -. Verify that the performance profile was applied by running the following command: -+ -[source,terminal] ----- -$ oc describe performanceprofile ----- - -. Verify that the required resources deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get all -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 13m -pod/numaresourcesoperator-worker-dvj4n 2/2 Running 0 16m -pod/numaresourcesoperator-worker-lcg4t 2/2 Running 0 16m -pod/secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 16m -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/numaresourcesoperator-worker 2 2 2 2 2 node-role.kubernetes.io/worker= 16m -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/numaresources-controller-manager 1/1 1 1 13m -deployment.apps/secondary-scheduler 1/1 1 1 16m -NAME DESIRED CURRENT READY AGE -replicaset.apps/numaresources-controller-manager-7575848485 1 1 1 13m -replicaset.apps/secondary-scheduler-56994cf6cf 1 1 1 16m ----- diff --git a/modules/cnf-disable-chronyd.adoc b/modules/cnf-disable-chronyd.adoc deleted file mode 100644 index 671a726fb8e2..000000000000 --- a/modules/cnf-disable-chronyd.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-disable-chronyd_{context}"] -= Disabling the chrony time service - -You can disable the chrony time service (`chronyd`) for nodes with a specific role by using a `MachineConfig` custom resource (CR). - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the `MachineConfig` CR that disables `chronyd` for the specified node role. - -.. Save the following YAML in the `disable-chronyd.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: <1> - name: disable-chronyd -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - contents: | - [Unit] - Description=NTP client/server - Documentation=man:chronyd(8) man:chrony.conf(5) - After=ntpdate.service sntp.service ntpd.service - Conflicts=ntpd.service systemd-timesyncd.service - ConditionCapability=CAP_SYS_TIME - [Service] - Type=forking - PIDFile=/run/chrony/chronyd.pid - EnvironmentFile=-/etc/sysconfig/chronyd - ExecStart=/usr/sbin/chronyd $OPTIONS - ExecStartPost=/usr/libexec/chrony-helper update-daemon - PrivateTmp=yes - ProtectHome=yes - ProtectSystem=full - [Install] - WantedBy=multi-user.target - enabled: false - name: "chronyd.service" ----- -<1> Node role where you want to disable `chronyd`, for example, `master`. - -.. Create the `MachineConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f disable-chronyd.yaml ----- diff --git a/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc b/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc deleted file mode 100644 index 53d72c4b574e..000000000000 --- a/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-installing-a-distributed-unit.adoc - -[id="cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc_{context}"] - -= Configuring a performance profile to support workload partitioning - -After you have configured workload partitioning, you need to ensure that the Performance Addon Operator has been installed and that you configured a performance profile. - -The reserved CPU IDs in the performance profile must match the workload partitioning CPU IDs. diff --git a/modules/cnf-du-management-pods.adoc b/modules/cnf-du-management-pods.adoc deleted file mode 100644 index b7dd626c82b6..000000000000 --- a/modules/cnf-du-management-pods.adoc +++ /dev/null @@ -1,358 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-du-management-pods.adoc_{context}"] - -= Cluster Management pods - -For the purposes of achieving 2-core (4 HT CPU) installation of single-node clusters, the set of pods that are considered _management_ are limited to: - -* Core Operators -* Day 2 Operators -* ACM pods - -The following tables identify the namespaces and pods that can be restricted to a subset of the CPUs on a node by configuring workload partitioning. - -== Core Operators - -[cols="1,1"] -|=== -| Namespace | Pod - -| openshift-apiserver-operator -| openshift-apiserver-operator - -| openshift-apiserver -| apiserver - -| openshift-authentication-operator -| authentication-operator - -| openshift-authentication -| oauth-openshift - -| openshift-cloud-controller-manager-operator -| cluster-cloud-controller-manager - -| openshift-cloud-credential-operator -| cloud-credential-operator - -| openshift-cluster-machine-approver -| machine-approver - -| openshift-cluster-node-tuning-operator -| cluster-node-tuning-operator - -| openshift-cluster-node-tuning-operator -| tuned - -| openshift-cluster-samples-operator -| cluster-samples-operator - -| openshift-cluster-storage-operator -| cluster-storage-operator - -| openshift-cluster-storage-operator -| csi-snapshot-controller - -| openshift-cluster-storage-operator -| csi-snapshot-controller-operator - -| openshift-cluster-storage-operator -| csi-snapshot-webhook - -| openshift-cluster-version -| cluster-version-operator - -| openshift-config-operator -| openshift-config-operator - -| openshift-console-operator -| console-operator - -| openshift-console -| console - -| openshift-console -| downloads - -| openshift-controller-manager-operator -| openshift-controller-manager-operator - -| openshift-controller-manager -| controller-manager - -| openshift-dns-operator -| dns-operator - -| openshift-dns -| dns-default - -| openshift-dns -| node-resolver - -| openshift-etcd-operator -| etcd-operator - -| openshift-etcd -| etcd - -| openshift-image-registry -| cluster-image-registry-operator - -| openshift-image-registry -| image-pruner - -| openshift-image-registry -| node-ca - -| openshift-ingress-canary -| ingress-canary - -| openshift-ingress-operator -| ingress-operator - -| openshift-ingress -| router-default - -| openshift-insights -| insights-operator - -| openshift-kube-apiserver-operator -| kube-apiserver-operator - -| openshift-kube-apiserver -| kube-apiserver - -| openshift-kube-controller-manager-operator -| kube-controller-manager-operator - -| openshift-kube-controller-manager -| kube-controller-manager - -| openshift-kube-scheduler-operator -| openshift-kube-scheduler-operator - -| openshift-kube-scheduler -| openshift-kube-scheduler - -| openshift-kube-storage-version-migrator-operator -| kube-storage-version-migrator-operator - -| openshift-kube-storage-version-migrator -| migrator - -| openshift-machine-api -| cluster-autoscaler-operator - -| openshift-machine-api -| cluster-baremetal-operator - -| openshift-machine-api -| machine-api-operator - -| openshift-machine-config-operator -| machine-config-controller - -| openshift-machine-config-operator -| machine-config-daemon - -| openshift-marketplace -| certified-operators - -| openshift-machine-config-operator -| machine-config-operator - -| openshift-machine-config-operator -| machine-config-server - -| openshift-marketplace -| community-operators - -| openshift-marketplace -| marketplace-operator - -| openshift-marketplace -| redhat-marketplace - -| openshift-marketplace -| redhat-operators - -| openshift-monitoring -| alertmanager-main - -| openshift-monitoring -| cluster-monitoring-operator - -| openshift-monitoring -| grafana - -| openshift-monitoring -| kube-state-metrics - -| openshift-monitoring -| node-exporter - -| openshift-monitoring -| openshift-state-metrics - -| openshift-monitoring -| prometheus-adapter - -| openshift-monitoring -| prometheus-adapter - -| openshift-monitoring -| prometheus-k8s - -| openshift-monitoring -| prometheus-operator - -| openshift-monitoring -| telemeter-client - -| openshift-monitoring -| thanos-querier - -| openshift-multus -| multus-admission-controller - -| openshift-multus -| multus - -| openshift-multus -| network-metrics-daemon - -| openshift-multus -| multus-additional-cni-plugins - -| openshift-network-diagnostics -| network-check-source - -| openshift-network-diagnostics -| network-check-target - -| openshift-network-operator -| network-operator - -| openshift-oauth-apiserver -| apiserver - -| openshift-operator-lifecycle-manager -| catalog-operator - -| openshift-operator-lifecycle-manager -| olm-operator - -| openshift-operator-lifecycle-manager -| packageserver - -| openshift-operator-lifecycle-manager -| packageserver - -| openshift-ovn-kubernetes -| ovnkube-master - -| openshift-ovn-kubernetes -| ovnkube-node - -| openshift-ovn-kubernetes -| ovs-node - -| openshift-service-ca-operator -| service-ca-operator - -| openshift-service-ca -| service-ca -|=== - -== Day 2 Operators - -[cols="1,1"] -|=== -| Namespace | Pod - -| openshift-ptp -| ptp-operator - -| openshift-ptp -| linuxptp-daemon - -| openshift-performance-addon-operator -| performance-operator - -| openshift-sriov-network-operator -| network-resources-injector - -| openshift-sriov-network-operator -| operator-webhook - -| openshift-sriov-network-operator -| sriov-cni - -| openshift-sriov-network-operator -| sriov-device-plugin - -| openshift-sriov-network-operator -| sriov-network-config-daemon - -| openshift-sriov-network-operator -| sriov-network-operator - -| local-storage -| local-disks-local-diskmaker - -| local-storage -| local-disks-local-provisioner - -| local-storage -| local-storage-operator - -| openshift-logging -| cluster-logging-operator - -| openshift-logging -| fluentd -|=== - - -== ACM pods - -[cols="1,1"] -|=== -| Namespace | Pod - -| open-cluster-management-agent-addon -| klusterlet-addon-appmgr - -| open-cluster-management-agent-addon -| klusterlet-addon-certpolicyctrl - -| open-cluster-management-agent-addon -| klusterlet-addon-iampolicyctrl - -| open-cluster-management-agent-addon -| klusterlet-addon-operator - -| open-cluster-management-agent-addon -| klusterlet-addon-policyctrl-config-policy - -| open-cluster-management-agent-addon -| klusterlet-addon-policyctrl-framework - -| open-cluster-management-agent-addon -| klusterlet-addon-search - -| open-cluster-management-agent-addon -| klusterlet-addon-workmgr - -| open-cluster-management-agent -| klusterlet - -| open-cluster-management-agent -| klusterlet-registration-agent - -| open-cluster-management-agent -| klusterlet-work-agent -|=== diff --git a/modules/cnf-fast-event-notifications-api-refererence.adoc b/modules/cnf-fast-event-notifications-api-refererence.adoc deleted file mode 100644 index 2b2357e626cb..000000000000 --- a/modules/cnf-fast-event-notifications-api-refererence.adoc +++ /dev/null @@ -1,405 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -[id="cnf-fast-event-notifications-api-refererence_{context}"] -= Subscribing DU applications to PTP events REST API reference - -Use the PTP event notifications REST API to subscribe a distributed unit (DU) application to the PTP events that are generated on the parent node. - -Subscribe applications to PTP events by using the resource address `/cluster/node//ptp`, where `` is the cluster node running the DU application. - -Deploy your `cloud-event-consumer` DU application container and `cloud-event-proxy` sidecar container in a separate DU application pod. The `cloud-event-consumer` DU application subscribes to the `cloud-event-proxy` container in the application pod. - -Use the following API endpoints to subscribe the `cloud-event-consumer` DU application to PTP events posted by the `cloud-event-proxy` container at [x-]`http://localhost:8089/api/ocloudNotifications/v1/` in the DU application pod: - -* `/api/ocloudNotifications/v1/subscriptions` -- `POST`: Creates a new subscription -- `GET`: Retrieves a list of subscriptions - -* `/api/ocloudNotifications/v1/subscriptions/` -- `GET`: Returns details for the specified subscription ID - -* `api/ocloudNotifications/v1/subscriptions/status/` -- `PUT`: Creates a new status ping request for the specified subscription ID - -* `/api/ocloudNotifications/v1/health` -- `GET`: Returns the health status of `ocloudNotifications` API - -* `api/ocloudNotifications/v1/publishers` -- `GET`: Returns an array of `os-clock-sync-state`, `ptp-clock-class-change`, and `lock-state` messages for the cluster node - -* `/api/ocloudnotifications/v1//CurrentState` -- `GET`: Returns the current state of one the following event types: `os-clock-sync-state`, `ptp-clock-class-change`, or `lock-state` events - -[NOTE] -==== -`9089` is the default port for the `cloud-event-consumer` container deployed in the application pod. You can configure a different port for your DU application as required. -==== - -== api/ocloudNotifications/v1/subscriptions - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Returns a list of subscriptions. If subscriptions exist, a `200 OK` status code is returned along with the list of subscriptions. - -.Example API response -[source,json] ----- -[ - { - "id": "75b1ad8f-c807-4c23-acf5-56f4b7ee3826", - "endpointUri": "http://localhost:9089/event", - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions/75b1ad8f-c807-4c23-acf5-56f4b7ee3826", - "resource": "/cluster/node/compute-1.example.com/ptp" - } -] ----- - -[discrete] -=== HTTP method - -`POST api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Creates a new subscription. If a subscription is successfully created, or if it already exists, a `201 Created` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| subscription -| data -|=== - -.Example payload -[source,json] ----- -{ - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions", - "resource": "/cluster/node/compute-1.example.com/ptp" -} ----- - -== api/ocloudNotifications/v1/subscriptions/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions/` - -[discrete] -==== Description - -Returns details for the subscription with ID `` - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{ - "id":"48210fb3-45be-4ce0-aa9b-41a0e58730ab", - "endpointUri": "http://localhost:9089/event", - "uriLocation":"http://localhost:8089/api/ocloudNotifications/v1/subscriptions/48210fb3-45be-4ce0-aa9b-41a0e58730ab", - "resource":"/cluster/node/compute-1.example.com/ptp" -} ----- - -== api/ocloudNotifications/v1/subscriptions/status/ - -[discrete] -=== HTTP method - -`PUT api/ocloudNotifications/v1/subscriptions/status/` - -[discrete] -==== Description - -Creates a new status ping request for subscription with ID ``. If a subscription is present, the status request is successful and a `202 Accepted` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{"status":"ping sent"} ----- - -== api/ocloudNotifications/v1/health/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/health/` - -[discrete] -==== Description - -Returns the health status for the `ocloudNotifications` REST API. - -.Example API response -[source,terminal] ----- -OK ----- - -== api/ocloudNotifications/v1/publishers - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/publishers` - -[discrete] -==== Description - -Returns an array of `os-clock-sync-state`, `ptp-clock-class-change`, and `lock-state` details for the cluster node. The system generates notifications when the relevant equipment state changes. - -* `os-clock-sync-state` notifications describe the host operating system clock synchronization state. Can be in `LOCKED` or `FREERUN` state. -* `ptp-clock-class-change` notifications describe the current state of the PTP clock class. -* `lock-state` notifications describe the current status of the PTP equipment lock state. Can be in `LOCKED`, `HOLDOVER` or `FREERUN` state. - -.Example API response -[source,json] ----- -[ - { - "id": "0fa415ae-a3cf-4299-876a-589438bacf75", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/0fa415ae-a3cf-4299-876a-589438bacf75", - "resource": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state" - }, - { - "id": "28cd82df-8436-4f50-bbd9-7a9742828a71", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/28cd82df-8436-4f50-bbd9-7a9742828a71", - "resource": "/cluster/node/compute-1.example.com/sync/ptp-status/ptp-clock-class-change" - }, - { - "id": "44aa480d-7347-48b0-a5b0-e0af01fa9677", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/44aa480d-7347-48b0-a5b0-e0af01fa9677", - "resource": "/cluster/node/compute-1.example.com/sync/ptp-status/lock-state" - } -] ----- - -You can find `os-clock-sync-state`, `ptp-clock-class-change` and `lock-state` events in the logs for the `cloud-event-proxy` container. For example: - -[source,terminal] ----- -$ oc logs -f linuxptp-daemon-cvgr6 -n openshift-ptp -c cloud-event-proxy ----- - -.Example os-clock-sync-state event -[source,json] ----- -{ - "id":"c8a784d1-5f4a-4c16-9a81-a3b4313affe5", - "type":"event.sync.sync-status.os-clock-sync-state-change", - "source":"/cluster/compute-1.example.com/ptp/CLOCK_REALTIME", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.906277159Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/sync-status/os-clock-sync-state", - "dataType":"notification", - "valueType":"enumeration", - "value":"LOCKED" - }, - { - "resource":"/sync/sync-status/os-clock-sync-state", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"-53" - } - ] - } -} ----- - -.Example ptp-clock-class-change event -[source,json] ----- -{ - "id":"69eddb52-1650-4e56-b325-86d44688d02b", - "type":"event.sync.ptp-status.ptp-clock-class-change", - "source":"/cluster/compute-1.example.com/ptp/ens2fx/master", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.147100033Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/ptp-status/ptp-clock-class-change", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"135" - } - ] - } -} ----- - -.Example lock-state event -[source,json] ----- -{ - "id":"305ec18b-1472-47b3-aadd-8f37933249a9", - "type":"event.sync.ptp-status.ptp-state-change", - "source":"/cluster/compute-1.example.com/ptp/ens2fx/master", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.467684081Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/ptp-status/lock-state", - "dataType":"notification", - "valueType":"enumeration", - "value":"LOCKED" - }, - { - "resource":"/sync/ptp-status/lock-state", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"62" - } - ] - } -} ----- - -== /api/ocloudnotifications/v1//CurrentState - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/cluster/node//sync/ptp-status/lock-state/CurrentState` - -`GET api/ocloudNotifications/v1/cluster/node//sync/sync-status/os-clock-sync-state/CurrentState` - -`GET api/ocloudNotifications/v1/cluster/node//sync/ptp-status/ptp-clock-class-change/CurrentState` - -[discrete] -==== Description - -Configure the `CurrentState` API endpoint to return the current state of the `os-clock-sync-state`, `ptp-clock-class-change`, or `lock-state` events for the cluster node. - -* `os-clock-sync-state` notifications describe the host operating system clock synchronization state. Can be in `LOCKED` or `FREERUN` state. -* `ptp-clock-class-change` notifications describe the current state of the PTP clock class. -* `lock-state` notifications describe the current status of the PTP equipment lock state. Can be in `LOCKED`, `HOLDOVER` or `FREERUN` state. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example lock-state API response -[source,json] ----- -{ - "id": "c1ac3aa5-1195-4786-84f8-da0ea4462921", - "type": "event.sync.ptp-status.ptp-state-change", - "source": "/cluster/node/compute-1.example.com/sync/ptp-status/lock-state", - "dataContentType": "application/json", - "time": "2023-01-10T02:41:57.094981478Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "notification", - "valueType": "enumeration", - "value": "LOCKED" - }, - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "29" - } - ] - } -} ----- - -.Example os-clock-sync-state API response -[source,json] ----- -{ - "specversion": "0.3", - "id": "4f51fe99-feaa-4e66-9112-66c5c9b9afcb", - "source": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state", - "type": "event.sync.sync-status.os-clock-sync-state-change", - "subject": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state", - "datacontenttype": "application/json", - "time": "2022-11-29T17:44:22.202Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/CLOCK_REALTIME", - "dataType": "notification", - "valueType": "enumeration", - "value": "LOCKED" - }, - { - "resource": "/cluster/node/compute-1.example.com/CLOCK_REALTIME", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "27" - } - ] - } -} ----- - -.Example ptp-clock-class-change API response -[source,json] ----- -{ - "id": "064c9e67-5ad4-4afb-98ff-189c6aa9c205", - "type": "event.sync.ptp-status.ptp-clock-class-change", - "source": "/cluster/node/compute-1.example.com/sync/ptp-status/ptp-clock-class-change", - "dataContentType": "application/json", - "time": "2023-01-10T02:41:56.785673989Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "165" - } - ] - } -} ----- diff --git a/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc b/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc deleted file mode 100644 index 35ffa12be643..000000000000 --- a/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="gathering-data-about-your-cluster-using-must-gather_{context}"] -= Gathering data about your cluster using the must-gather command - -The Performance Profile Creator (PPC) tool requires `must-gather` data. As a cluster administrator, run the `must-gather` command to capture information about your cluster. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The OpenShift CLI (`oc`) installed. - -.Procedure - -. Optional: Verify that a matching machine config pool exists with a label: -+ -[source,terminal] ----- -$ oc describe mcp/worker-rt ----- -+ -.Example output -[source,terminal] ----- -Name: worker-rt -Namespace: -Labels: machineconfiguration.openshift.io/role=worker-rt ----- - -. If a matching label does not exist add a label for a machine config pool (MCP) that matches with the MCP name: -+ -[source,terminal] ----- -$ oc label mcp ="" ----- - -. Navigate to the directory where you want to store the `must-gather` data. - -. Collect cluster information by running the following command: -+ -[source,terminal] ----- -$ oc adm must-gather ----- - -. Optional: Create a compressed file from the `must-gather` directory: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather/ ----- -+ -[NOTE] -==== -Compressed output is required if you are running the Performance Profile Creator wrapper script. -==== diff --git a/modules/cnf-how-run-podman-to-create-profile.adoc b/modules/cnf-how-run-podman-to-create-profile.adoc deleted file mode 100644 index ede62cb5a310..000000000000 --- a/modules/cnf-how-run-podman-to-create-profile.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -[id="how-to-run-podman-to-create-a-profile_{context}"] -= How to run `podman` to create a performance profile - -The following example illustrates how to run `podman` to create a performance profile with 20 reserved CPUs that are to be split across the NUMA nodes. - -Node hardware configuration: - -* 80 CPUs -* Hyperthreading enabled -* Two NUMA nodes -* Even numbered CPUs run on NUMA node 0 and odd numbered CPUs run on NUMA node 1 - -Run `podman` to create the performance profile: - -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true --split-reserved-cpus-across-numa=true --must-gather-dir-path /must-gather > my-performance-profile.yaml ----- - -The created profile is described in the following YAML: - -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: performance - spec: - cpu: - isolated: 10-39,50-79 - reserved: 0-9,40-49 - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true ----- - -[NOTE] -==== -In this case, 10 CPUs are reserved on NUMA node 0 and 10 are reserved on NUMA node 1. -==== diff --git a/modules/cnf-installing-amq-interconnect-messaging-bus.adoc b/modules/cnf-installing-amq-interconnect-messaging-bus.adoc deleted file mode 100644 index f5770addccb1..000000000000 --- a/modules/cnf-installing-amq-interconnect-messaging-bus.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-amq-interconnect-messaging-bus_{context}"] -= Installing the AMQ messaging bus - -To pass PTP fast event notifications between publisher and subscriber on a node, you can install and configure an AMQ messaging bus to run locally on the node. -To use AMQ messaging, you must install the AMQ Interconnect Operator. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -* Install the AMQ Interconnect Operator to its own `amq-interconnect` namespace. See link:https://access.redhat.com/documentation/en-us/red_hat_amq/2021.q1/html/deploying_amq_interconnect_on_openshift/adding-operator-router-ocp[Adding the Red Hat Integration - AMQ Interconnect Operator]. - -.Verification - -. Check that the AMQ Interconnect Operator is available and the required pods are running: -+ -[source,terminal] ----- -$ oc get pods -n amq-interconnect ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -amq-interconnect-645db76c76-k8ghs 1/1 Running 0 23h -interconnect-operator-5cb5fc7cc-4v7qm 1/1 Running 0 23h ----- - -. Check that the required `linuxptp-daemon` PTP event producer pods are running in the `openshift-ptp` namespace. -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -linuxptp-daemon-2t78p 3/3 Running 0 12h -linuxptp-daemon-k8n88 3/3 Running 0 12h ----- diff --git a/modules/cnf-installing-numa-resources-operator-cli.adoc b/modules/cnf-installing-numa-resources-operator-cli.adoc deleted file mode 100644 index c58047bcd178..000000000000 --- a/modules/cnf-installing-numa-resources-operator-cli.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-numa-resources-operator-cli_{context}"] -= Installing the NUMA Resources Operator using the CLI - -As a cluster administrator, you can install the Operator using the CLI. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-numaresources ----- - -.. Create the `Namespace` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-namespace.yaml ----- - -. Create the Operator group for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-operatorgroup.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: numaresources-operator - namespace: openshift-numaresources -spec: - targetNamespaces: - - openshift-numaresources ----- - -.. Create the `OperatorGroup` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-operatorgroup.yaml ----- - -. Create the subscription for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-sub.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1 -kind: Subscription -metadata: - name: numaresources-operator - namespace: openshift-numaresources -spec: - channel: "{product-version}" - name: numaresources-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-sub.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource in the `openshift-numaresources` namespace. Run the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-numaresources ----- -+ -.Example output - -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -numaresources-operator.v{product-version}.2 numaresources-operator {product-version}.2 Succeeded ----- diff --git a/modules/cnf-installing-numa-resources-operator-console.adoc b/modules/cnf-installing-numa-resources-operator-console.adoc deleted file mode 100644 index b793ee53dc91..000000000000 --- a/modules/cnf-installing-numa-resources-operator-console.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-numa-resources-operator-console_{context}"] -= Installing the NUMA Resources Operator using the web console - -As a cluster administrator, you can install the NUMA Resources Operator using the web console. - -.Procedure - -. Install the NUMA Resources Operator using the {product-title} web console: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *NUMA Resources Operator* from the list of available Operators, and then click *Install*. - -. Optional: Verify that the NUMA Resources Operator installed successfully: - -.. Switch to the *Operators* -> *Installed Operators* page. - -.. Ensure that *NUMA Resources Operator* is listed in the *default* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== -+ -If the Operator does not appear as installed, to troubleshoot further: -+ -* Go to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -* Go to the *Workloads* -> *Pods* page and check the logs for pods in the `default` project. diff --git a/modules/cnf-installing-the-operators.adoc b/modules/cnf-installing-the-operators.adoc deleted file mode 100644 index d0781bc6f2c1..000000000000 --- a/modules/cnf-installing-the-operators.adoc +++ /dev/null @@ -1,238 +0,0 @@ -// CNF-950 4.7 Installing the operators -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-the-operators_{context}"] -= Installing the Operators - -[id="cnf-installing-the-performnce-addon-operator_{context}"] -== Installing the Performance Addon Operator - -Install the Performance Addon Operator using the {product-title} CLI. - -.Procedure - -. Create the Performance Addon Operator namespace: -+ -[source,terminal] ----- -cat <" <1> - name: performance-addon-operator - source: redhat-operators <2> - sourceNamespace: openshift-marketplace -EOF ----- -<1> Specify the value you obtained in the previous step for the `status.defaultChannel` parameter. -<2> You must specify the `redhat-operators` value. - -[id="cnf-installing-the-precision-time-protocol-operator_{context}"] -== Installing the Precision Time Protocol (PTP) Operator - -Install the PTP Operator using the {product-title} CLI or the web console. - -.Procedure - -. Apply the Operator namespace: -+ -[source,terminal] ----- -cat <` variable to the command specifying an appropriate label. -==== diff --git a/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc b/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc deleted file mode 100644 index 898d72bd4f1e..000000000000 --- a/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="logging-associated-with-adjusting-nic-queues_{context}"] -= Logging associated with adjusting NIC queues - -Log messages detailing the assigned devices are recorded in the respective Tuned daemon logs. The following messages might be recorded to the `/var/log/tuned/tuned.log` file: - -* An `INFO` message is recorded detailing the successfully assigned devices: -+ -[source, terminal] ----- -INFO tuned.plugins.base: instance net_test (net): assigning devices ens1, ens2, ens3 ----- -* A `WARNING` message is recorded if none of the devices can be assigned: -+ -[source, terminal] ----- -WARNING tuned.plugins.base: instance net_test: no matching devices available ----- diff --git a/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc b/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc deleted file mode 100644 index aecf8fdeee1e..000000000000 --- a/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// CNF-802 Infrastructure-provided interrupt processing for guaranteed pod CPUs -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -[id="managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus_{context}"] -= Managing device interrupt processing for guaranteed pod isolated CPUs - -The Node Tuning Operator can manage host CPUs by dividing them into reserved CPUs for cluster and operating system housekeeping duties, including pod infra containers, and isolated CPUs for application containers to run the workloads. This allows you to set CPUs for low latency workloads as isolated. - -Device interrupts are load balanced between all isolated and reserved CPUs to avoid CPUs being overloaded, with the exception of CPUs where there is a guaranteed pod running. Guaranteed pod CPUs are prevented from processing device interrupts when the relevant annotations are set for the pod. - -In the performance profile, `globallyDisableIrqLoadBalancing` is used to manage whether device interrupts are processed or not. For certain workloads, the reserved CPUs are not always sufficient for dealing with device interrupts, and for this reason, device interrupts are not globally disabled on the isolated CPUs. By default, Node Tuning Operator does not disable device interrupts on isolated CPUs. - -To achieve low latency for workloads, some (but not all) pods require the CPUs they are running on to not process device interrupts. A pod annotation, `irq-load-balancing.crio.io`, is used to define whether device interrupts are processed or not. When configured, CRI-O disables device interrupts only as long as the pod is running. - -[id="disabling-cpu-cfs-quota_{context}"] -== Disabling CPU CFS quota - -To reduce CPU throttling for individual guaranteed pods, create a pod specification with the annotation `cpu-quota.crio.io: "disable"`. This annotation disables the CPU completely fair scheduler (CFS) quota at the pod run time. The following pod specification contains this annotation: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - cpu-quota.crio.io: "disable" -spec: - runtimeClassName: performance- -... ----- - -[NOTE] -==== -Only disable CPU CFS quota when the CPU manager static policy is enabled and for pods with guaranteed QoS that use whole CPUs. Otherwise, disabling CPU CFS quota can affect the performance of other containers in the cluster. -==== - -[id="configuring-global-device-interrupts-handling-for-isolated-cpus_{context}"] -== Disabling global device interrupts handling in Node Tuning Operator - -To configure Node Tuning Operator to disable global device interrupts for the isolated CPU set, set the `globallyDisableIrqLoadBalancing` field in the performance profile to `true`. When `true`, conflicting pod annotations are ignored. When `false`, IRQ loads are balanced across all CPUs. - -A performance profile snippet illustrates this setting: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - globallyDisableIrqLoadBalancing: true -... ----- - -[id="disabling_interrupt_processing_for_individual_pods_{context}"] -== Disabling interrupt processing for individual pods - -To disable interrupt processing for individual pods, ensure that `globallyDisableIrqLoadBalancing` is set to `false` in the performance profile. Then, in the pod specification, set the `irq-load-balancing.crio.io` pod annotation to `disable`. The following pod specification contains this annotation: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: Pod -metadata: - annotations: - irq-load-balancing.crio.io: "disable" -spec: - runtimeClassName: performance- -... ----- diff --git a/modules/cnf-measuring-latency.adoc b/modules/cnf-measuring-latency.adoc deleted file mode 100644 index 67e08afa1c59..000000000000 --- a/modules/cnf-measuring-latency.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: CONCEPT -[id="cnf-measuring-latency_{context}"] -= Measuring latency - -The `cnf-tests` image uses three tools to measure the latency of the system: - -* `hwlatdetect` -* `cyclictest` -* `oslat` - -Each tool has a specific use. Use the tools in sequence to achieve reliable test results. - -hwlatdetect:: Measures the baseline that the bare-metal hardware can achieve. Before proceeding with the next latency test, ensure that the latency reported by `hwlatdetect` meets the required threshold because you cannot fix hardware latency spikes by operating system tuning. - -cyclictest:: Verifies the real-time kernel scheduler latency after `hwlatdetect` passes validation. The `cyclictest` tool schedules a repeated timer and measures the difference between the desired and the actual trigger times. The difference can uncover basic issues with the tuning caused by interrupts or process priorities. The tool must run on a real-time kernel. - -oslat:: Behaves similarly to a CPU-intensive DPDK application and measures all the interruptions and disruptions to the busy loop that simulates CPU heavy data processing. - -The tests introduce the following environment variables: - -.Latency test environment variables -[cols="1,3", options="header"] -|==== -|Environment variables -|Description - -|`LATENCY_TEST_DELAY` -|Specifies the amount of time in seconds after which the test starts running. You can use the variable to allow the CPU manager reconcile loop to update the default CPU pool. The default value is 0. - -|`LATENCY_TEST_CPUS` -|Specifies the number of CPUs that the pod running the latency tests uses. If you do not set the variable, the default configuration includes all isolated CPUs. - -|`LATENCY_TEST_RUNTIME` -|Specifies the amount of time in seconds that the latency test must run. The default value is 300 seconds. - -|`HWLATDETECT_MAXIMUM_LATENCY` -|Specifies the maximum acceptable hardware latency in microseconds for the workload and operating system. If you do not set the value of `HWLATDETECT_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool compares the default expected threshold (20μs) and the actual maximum latency in the tool itself. Then, the test fails or succeeds accordingly. - -|`CYCLICTEST_MAXIMUM_LATENCY` -|Specifies the maximum latency in microseconds that all threads expect before waking up during the `cyclictest` run. If you do not set the value of `CYCLICTEST_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool skips the comparison of the expected and the actual maximum latency. - -|`OSLAT_MAXIMUM_LATENCY` -|Specifies the maximum acceptable latency in microseconds for the `oslat` test results. If you do not set the value of `OSLAT_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool skips the comparison of the expected and the actual maximum latency. - -|`MAXIMUM_LATENCY` -|Unified variable that specifies the maximum acceptable latency in microseconds. Applicable for all available latency tools. - -|`LATENCY_TEST_RUN` -|Boolean parameter that indicates whether the tests should run. `LATENCY_TEST_RUN` is set to `false` by default. To run the latency tests, set this value to `true`. -|==== - -[NOTE] -==== -Variables that are specific to a latency tool take precedence over unified variables. For example, if `OSLAT_MAXIMUM_LATENCY` is set to 30 microseconds and `MAXIMUM_LATENCY` is set to 10 microseconds, the `oslat` test will run with maximum acceptable latency of 30 microseconds. -==== diff --git a/modules/cnf-migrating-from-amqp-to-http-transport.adoc b/modules/cnf-migrating-from-amqp-to-http-transport.adoc deleted file mode 100644 index bac9bbec9429..000000000000 --- a/modules/cnf-migrating-from-amqp-to-http-transport.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-migrating-from-amqp-to-http-transport_{context}"] -= Migrating consumer applications to use HTTP transport for PTP or bare-metal events - -If you have previously deployed PTP or bare-metal events consumer applications, you need to update the applications to use HTTP message transport. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You have updated the PTP Operator or {redfish-operator} to version 4.13+ which uses HTTP transport by default. - -* Configure dynamic volume provisioning in the cluster or manually create `StorageClass`, `LocalVolume`, and `PersistentVolume` resources to persist the events subscription. -+ -[NOTE] -==== -When dynamic volume provisioning is enabled, a `PersistentVolume` resource is automatically created for the `PersistentVolumeClaim` that the PTP Operator or {redfish-operator} deploys. -==== - -.Procedure - -. Update your events consumer application to use HTTP transport. -Set the `http-event-publishers` variable for the cloud event sidecar deployment. -+ -For example, in a cluster with PTP events configured, the following YAML snippet illustrates a cloud event sidecar deployment: -+ -[source,yaml] ----- -containers: - - name: cloud-event-sidecar - image: cloud-event-sidecar - args: - - "--metrics-addr=127.0.0.1:9091" - - "--store-path=/store" - - "--transport-host=consumer-events-subscription-service.cloud-events.svc.cluster.local:9043" - - "--http-event-publishers=ptp-event-publisher-service-NODE_NAME.openshift-ptp.svc.cluster.local:9043" <1> - - "--api-port=8089" ----- -<1> The PTP Operator automatically resolves `NODE_NAME` to the host that is generating the PTP events. -For example, `compute-1.example.com`. -+ -In a cluster with bare-metal events configured, set the `http-event-publishers` field to `hw-event-publisher-service.openshift-bare-metal-events.svc.cluster.local:9043` in the cloud event sidecar deployment CR. - -. Deploy the `consumer-events-subscription-service` service alongside the events consumer application. -For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/scrape: "true" - service.alpha.openshift.io/serving-cert-secret-name: sidecar-consumer-secret - name: consumer-events-subscription-service - namespace: cloud-events - labels: - app: consumer-service -spec: - ports: - - name: sub-port - port: 9043 - selector: - app: consumer - clusterIP: None - sessionAffinity: None - type: ClusterIP ----- diff --git a/modules/cnf-modifying-and-applying-the-default-profile.adoc b/modules/cnf-modifying-and-applying-the-default-profile.adoc deleted file mode 100644 index fcd6dd80a0e7..000000000000 --- a/modules/cnf-modifying-and-applying-the-default-profile.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// CNF-950 4.7 Modifying and applying the default profile -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-modifying-and-applying-the-default-profile_{context}"] -= Modifying and applying the default profile - -You can apply the profile manually or with the toolset of your choice, such as ArgoCD. - -[NOTE] -==== -This procedure applies the DU profile step-by-step. If the profile is pulled together into a single project and applied in one step, issues will occur between the MCO and -the SRIOV operators if an Intel NIC is used for networking traffic. To avoid a race condition between the MCO and the SRIOV Operators, it is recommended that the DU application be applied in three steps: - -. Apply the profile without SRIOV. -. Wait for the cluster to settle. -. Apply the SRIOV portion. -==== diff --git a/modules/cnf-monitoring-fast-events-metrics.adoc b/modules/cnf-monitoring-fast-events-metrics.adoc deleted file mode 100644 index 4a801789ce1d..000000000000 --- a/modules/cnf-monitoring-fast-events-metrics.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-monitoring-fast-events-metrics_{context}"] -= Monitoring PTP fast event metrics - -You can monitor PTP fast events metrics from cluster nodes where the `linuxptp-daemon` is running. -You can also monitor PTP fast event metrics in the {product-title} web console by using the pre-configured and self-updating Prometheus monitoring stack. - -.Prerequisites - -* Install the {product-title} CLI `oc`. - -* Log in as a user with `cluster-admin` privileges. - -* Install and configure the PTP Operator on a node with PTP-capable hardware. - -.Procedure - -. Check for exposed PTP metrics on any node where the `linuxptp-daemon` is running. For example, run the following command: -+ -[source,terminal] ----- -$ curl http://:9091/metrics ----- -+ -.Example output ----- -# HELP openshift_ptp_clock_state 0 = FREERUN, 1 = LOCKED, 2 = HOLDOVER -# TYPE openshift_ptp_clock_state gauge -openshift_ptp_clock_state{iface="ens1fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens3fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens5fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 1 -# HELP openshift_ptp_delay_ns -# TYPE openshift_ptp_delay_ns gauge -openshift_ptp_delay_ns{from="master",iface="ens1fx",node="compute-1.example.com",process="ptp4l"} 842 -openshift_ptp_delay_ns{from="master",iface="ens3fx",node="compute-1.example.com",process="ptp4l"} 480 -openshift_ptp_delay_ns{from="master",iface="ens5fx",node="compute-1.example.com",process="ptp4l"} 584 -openshift_ptp_delay_ns{from="master",iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 482 -openshift_ptp_delay_ns{from="phc",iface="CLOCK_REALTIME",node="compute-1.example.com",process="phc2sys"} 547 -# HELP openshift_ptp_offset_ns -# TYPE openshift_ptp_offset_ns gauge -openshift_ptp_offset_ns{from="master",iface="ens1fx",node="compute-1.example.com",process="ptp4l"} -2 -openshift_ptp_offset_ns{from="master",iface="ens3fx",node="compute-1.example.com",process="ptp4l"} -44 -openshift_ptp_offset_ns{from="master",iface="ens5fx",node="compute-1.example.com",process="ptp4l"} -8 -openshift_ptp_offset_ns{from="master",iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 3 -openshift_ptp_offset_ns{from="phc",iface="CLOCK_REALTIME",node="compute-1.example.com",process="phc2sys"} 12 ----- - -. To view the PTP event in the {product-title} web console, copy the name of the PTP metric you want to query, for example, `openshift_ptp_offset_ns`. - -. In the {product-title} web console, click *Observe* -> *Metrics*. - -. Paste the PTP metric name into the *Expression* field, and click *Run queries*. diff --git a/modules/cnf-performance-profile-creator-arguments.adoc b/modules/cnf-performance-profile-creator-arguments.adoc deleted file mode 100644 index 24abb8322fbc..000000000000 --- a/modules/cnf-performance-profile-creator-arguments.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - - -[id="performance-profile-creator-arguments_{context}"] -= Performance Profile Creator arguments - -.Performance Profile Creator arguments -[cols="30%,70%",options="header"] -|=== -| Argument | Description - -| `disable-ht` -a|Disable hyperthreading. - -Possible values: `true` or `false`. - -Default: `false`. - -[WARNING] -==== -If this argument is set to `true` you should not disable hyperthreading in the BIOS. Disabling hyperthreading is accomplished with a kernel command line argument. -==== - -| `info` -a| This captures cluster information and is used in discovery mode only. Discovery mode also requires the `must-gather-dir-path` argument. If any other arguments are set they are ignored. - -Possible values: - -* `log` -* `JSON` - -+ -[NOTE] -==== -These options define the output format with the JSON format being reserved for debugging. -==== - -Default: `log`. - -| `mcp-name` -|MCP name for example `worker-cnf` corresponding to the target machines. This parameter is required. - -| `must-gather-dir-path` -| Must gather directory path. This parameter is required. - -When the user runs the tool with the wrapper script `must-gather` is supplied by the script itself and the user must not specify it. - -| `offlined-cpu-count` -a| Number of offlined CPUs. - -[NOTE] -==== -This must be a natural number greater than 0. If not enough logical processors are offlined then error messages are logged. The messages are: -[source,terminal] ----- -Error: failed to compute the reserved and isolated CPUs: please ensure that reserved-cpu-count plus offlined-cpu-count should be in the range [0,1] ----- -[source,terminal] ----- -Error: failed to compute the reserved and isolated CPUs: please specify the offlined CPU count in the range [0,1] ----- -==== - -| `power-consumption-mode` -a|The power consumption mode. - -Possible values: - -* `default`: CPU partitioning with enabled power management and basic low-latency. -* `low-latency`: Enhanced measures to improve latency figures. -* `ultra-low-latency`: Priority given to optimal latency, at the expense of power management. - -Default: `default`. - -| `per-pod-power-management` -a|Enable per-pod power management. You cannot use this argument if you configured `ultra-low-latency` as the power consumption mode. - -Possible values: `true` or `false`. - -Default: `false`. - -| `profile-name` -| Name of the performance profile to create. -Default: `performance`. - -| `reserved-cpu-count` -a| Number of reserved CPUs. This parameter is required. - -[NOTE] -==== -This must be a natural number. A value of 0 is not allowed. -==== - -| `rt-kernel` -| Enable real-time kernel. This parameter is required. - -Possible values: `true` or `false`. - -| `split-reserved-cpus-across-numa` -| Split the reserved CPUs across NUMA nodes. - -Possible values: `true` or `false`. - -Default: `false`. - -| `topology-manager-policy` -a| Kubelet Topology Manager policy of the performance profile to be created. - -Possible values: - -* `single-numa-node` -* `best-effort` -* `restricted` - -Default: `restricted`. - -| `user-level-networking` -| Run with user level networking (DPDK) enabled. - -Possible values: `true` or `false`. - -Default: `false`. -|=== \ No newline at end of file diff --git a/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc b/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc deleted file mode 100644 index 71c2b0eaac31..000000000000 --- a/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-disconnected-mode_{context}"] -= Running latency tests in a disconnected cluster - -The CNF tests image can run tests in a disconnected cluster that is not able to reach external registries. This requires two steps: - -. Mirroring the `cnf-tests` image to the custom disconnected registry. - -. Instructing the tests to consume the images from the custom disconnected registry. - -[discrete] -[id="cnf-performing-end-to-end-tests-mirroring-images-to-custom-registry_{context}"] -== Mirroring the images to a custom registry accessible from the cluster - -A `mirror` executable is shipped in the image to provide the input required by `oc` to mirror the test image to a local registry. - -. Run this command from an intermediate machine that has access to the cluster and link:https://catalog.redhat.com/software/containers/explore[registry.redhat.io]: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/mirror -registry | oc image mirror -f - ----- -+ -where: -+ --- - :: Is the disconnected mirror registry you have configured, for example, `my.local.registry:5000/`. --- - -. When you have mirrored the `cnf-tests` image into the disconnected registry, you must override the original registry used to fetch the images when running the tests, for example: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e IMAGE_REGISTRY="" \ --e CNF_TESTS_IMAGE="cnf-tests-rhel8:v{product-version}" \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -[discrete] -[id="cnf-performing-end-to-end-tests-image-parameters_{context}"] -== Configuring the tests to consume images from a custom registry - -You can run the latency tests using a custom test image and image registry using `CNF_TESTS_IMAGE` and `IMAGE_REGISTRY` variables. - -* To configure the latency tests to use a custom test image and image registry, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e IMAGE_REGISTRY="" \ --e CNF_TESTS_IMAGE="" \ --e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} /usr/bin/test-run.sh ----- -+ -where: -+ --- - :: is the custom image registry, for example, `custom.registry:5000/`. - :: is the custom cnf-tests image, for example, `custom-cnf-tests-image:latest`. --- - -[discrete] -[id="cnf-performing-end-to-end-tests-mirroring-to-cluster-internal-registry_{context}"] -== Mirroring images to the cluster {product-registry} - -{product-title} provides a built-in container image registry, which runs as a standard workload on the cluster. - -.Procedure - -. Gain external access to the registry by exposing it with a route: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"defaultRoute":true}}' --type=merge ----- - -. Fetch the registry endpoint by running the following command: -+ -[source,terminal] ----- -$ REGISTRY=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') ----- - -. Create a namespace for exposing the images: -+ -[source,terminal] ----- -$ oc create ns cnftests ----- - -. Make the image stream available to all the namespaces used for tests. This is required to allow the tests namespaces to fetch the images from the `cnf-tests` image stream. Run the following commands: -+ -[source,terminal] ----- -$ oc policy add-role-to-user system:image-puller system:serviceaccount:cnf-features-testing:default --namespace=cnftests ----- -+ -[source,terminal] ----- -$ oc policy add-role-to-user system:image-puller system:serviceaccount:performance-addon-operators-testing:default --namespace=cnftests ----- - -. Retrieve the docker secret name and auth token by running the following commands: -+ -[source,terminal] ----- -$ SECRET=$(oc -n cnftests get secret | grep builder-docker | awk {'print $1'} ----- -+ -[source,terminal] ----- -$ TOKEN=$(oc -n cnftests get secret $SECRET -o jsonpath="{.data['\.dockercfg']}" | base64 --decode | jq '.["image-registry.openshift-image-registry.svc:5000"].auth') ----- - -. Create a `dockerauth.json` file, for example: -+ -[source,bash] ----- -$ echo "{\"auths\": { \"$REGISTRY\": { \"auth\": $TOKEN } }}" > dockerauth.json ----- - -. Do the image mirroring: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:{product-version} \ -/usr/bin/mirror -registry $REGISTRY/cnftests | oc image mirror --insecure=true \ --a=$(pwd)/dockerauth.json -f - ----- - -. Run the tests: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e IMAGE_REGISTRY=image-registry.openshift-image-registry.svc:5000/cnftests \ -cnf-tests-local:latest /usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -[discrete] -[id="mirroring-different-set-of-images_{context}"] -== Mirroring a different set of test images - -You can optionally change the default upstream images that are mirrored for the latency tests. - -.Procedure - -. The `mirror` command tries to mirror the upstream images by default. This can be overridden by passing a file with the following format to the image: -+ - -[source,yaml,subs="attributes+"] ----- -[ - { - "registry": "public.registry.io:5000", - "image": "imageforcnftests:{product-version}" - } -] ----- - -. Pass the file to the `mirror` command, for example saving it locally as `images.json`. With the following command, the local path is mounted in `/kubeconfig` inside the container and that can be passed to the mirror command. -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} /usr/bin/mirror \ ---registry "my.local.registry:5000/" --images "/kubeconfig/images.json" \ -| oc image mirror -f - ----- diff --git a/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc b/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc deleted file mode 100644 index be2ad8f1b5e1..000000000000 --- a/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-junit-test-output_{context}"] -= Generating a JUnit latency test report - -Use the following procedures to generate a JUnit latency test output and test failure report. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Create a JUnit-compliant XML report by passing the `--junit` parameter together with the path to where the report is dumped: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -v $(pwd)/junitdest: \ --e KUBECONFIG=/kubeconfig/kubeconfig -e DISCOVERY_MODE=true -e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh --junit \ --ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the path to the folder where the junit report is generated --- diff --git a/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc b/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc deleted file mode 100644 index 608533e13786..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-cyclictest_{context}"] -= Running cyclictest - -The `cyclictest` tool measures the real-time kernel scheduler latency on the specified CPUs. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. - -* You have installed the real-time kernel in the cluster. - -* You have applied a cluster performance profile by using Node Tuning Operator. - -.Procedure - -* To perform the `cyclictest`, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_CPUS=10 -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="cyclictest" ----- -+ -The command runs the `cyclictest` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (in this example, 20 μs). Latency spikes of 20 μs and above are generally not acceptable for telco RAN workloads. -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal,subs="attributes+"] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=cyclictest -I0908 13:01:59.193776 27 request.go:601] Waited for 1.046228824s due to client-side throttling, not priority and fairness, request: GET:https://api.compute-1.example.com:6443/apis/packages.operators.coreos.com/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662642118 -Will run 1 of 194 specs - -[...] - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the cyclictest image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:220 - -Ran 1 of 194 Specs in 161.151 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (161.48s) -FAIL ----- - -[discrete] -[id="cnf-performing-end-to-end-tests-example-results-cyclictest_{context}"] -== Example cyclictest results - -The same output can indicate different results for different workloads. For example, spikes up to 18μs are acceptable for 4G DU workloads, but not for 5G DU workloads. - -.Example of good results -[source, terminal] ----- -running cmd: cyclictest -q -D 10m -p 1 -t 16 -a 2,4,6,8,10,12,14,16,54,56,58,60,62,64,66,68 -h 30 -i 1000 -m -# Histogram -000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000001 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000002 579506 535967 418614 573648 532870 529897 489306 558076 582350 585188 583793 223781 532480 569130 472250 576043 -More histogram entries ... -# Total: 000600000 000600000 000600000 000599999 000599999 000599999 000599998 000599998 000599998 000599997 000599997 000599996 000599996 000599995 000599995 000599995 -# Min Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Avg Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Max Latencies: 00005 00005 00004 00005 00004 00004 00005 00005 00006 00005 00004 00005 00004 00004 00005 00004 -# Histogram Overflows: 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 -# Histogram Overflow at cycle number: -# Thread 0: -# Thread 1: -# Thread 2: -# Thread 3: -# Thread 4: -# Thread 5: -# Thread 6: -# Thread 7: -# Thread 8: -# Thread 9: -# Thread 10: -# Thread 11: -# Thread 12: -# Thread 13: -# Thread 14: -# Thread 15: ----- - -.Example of bad results -[source, terminal] ----- -running cmd: cyclictest -q -D 10m -p 1 -t 16 -a 2,4,6,8,10,12,14,16,54,56,58,60,62,64,66,68 -h 30 -i 1000 -m -# Histogram -000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000001 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000002 564632 579686 354911 563036 492543 521983 515884 378266 592621 463547 482764 591976 590409 588145 589556 353518 -More histogram entries ... -# Total: 000599999 000599999 000599999 000599997 000599997 000599998 000599998 000599997 000599997 000599996 000599995 000599996 000599995 000599995 000599995 000599993 -# Min Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Avg Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Max Latencies: 00493 00387 00271 00619 00541 00513 00009 00389 00252 00215 00539 00498 00363 00204 00068 00520 -# Histogram Overflows: 00001 00001 00001 00002 00002 00001 00000 00001 00001 00001 00002 00001 00001 00001 00001 00002 -# Histogram Overflow at cycle number: -# Thread 0: 155922 -# Thread 1: 110064 -# Thread 2: 110064 -# Thread 3: 110063 155921 -# Thread 4: 110063 155921 -# Thread 5: 155920 -# Thread 6: -# Thread 7: 110062 -# Thread 8: 110062 -# Thread 9: 155919 -# Thread 10: 110061 155919 -# Thread 11: 155918 -# Thread 12: 155918 -# Thread 13: 110060 -# Thread 14: 110060 -# Thread 15: 110059 155917 ----- diff --git a/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc b/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc deleted file mode 100644 index 110a0d1f281f..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: CONCEPT -[id="cnf-performing-end-to-end-tests-running-hwlatdetect_{context}"] -= Running hwlatdetect - -The `hwlatdetect` tool is available in the `rt-kernel` package with a regular subscription of {op-system-base-full} {op-system-version}. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have installed the real-time kernel in the cluster. - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. - -.Procedure - -* To run the `hwlatdetect` tests, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="hwlatdetect" ----- -+ -The `hwlatdetect` test runs for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (20 μs). -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=hwlatdetect -I0908 15:25:20.023712 27 request.go:601] Waited for 1.046586367s due to client-side throttling, not priority and fairness, request: GET:https://api.hlxcl6.lab.eng.tlv2.redhat.com:6443/apis/imageregistry.operator.openshift.io/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662650718 -Will run 1 of 194 specs - -[...] - -• Failure [283.574 seconds] -[performance] Latency Test -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:62 - with the hwlatdetect image - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:228 - should succeed [It] - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:236 - - Log file created at: 2022/09/08 15:25:27 - Running on machine: hwlatdetect-b6n4n - Binary: Built with gc go1.17.12 for linux/amd64 - Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg - I0908 15:25:27.160620 1 node.go:39] Environment information: /proc/cmdline: BOOT_IMAGE=(hd1,gpt3)/ostree/rhcos-c6491e1eedf6c1f12ef7b95e14ee720bf48359750ac900b7863c625769ef5fb9/vmlinuz-4.18.0-372.19.1.el8_6.x86_64 random.trust_cpu=on console=tty0 console=ttyS0,115200n8 ignition.platform.id=metal ostree=/ostree/boot.1/rhcos/c6491e1eedf6c1f12ef7b95e14ee720bf48359750ac900b7863c625769ef5fb9/0 ip=dhcp root=UUID=5f80c283-f6e6-4a27-9b47-a287157483b2 rw rootflags=prjquota boot=UUID=773bf59a-bafd-48fc-9a87-f62252d739d3 skew_tick=1 nohz=on rcu_nocbs=0-3 tuned.non_isolcpus=0000ffff,ffffffff,fffffff0 systemd.cpu_affinity=4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79 intel_iommu=on iommu=pt isolcpus=managed_irq,0-3 nohz_full=0-3 tsc=nowatchdog nosoftlockup nmi_watchdog=0 mce=off skew_tick=1 rcutree.kthread_prio=11 + + - I0908 15:25:27.160830 1 node.go:46] Environment information: kernel version 4.18.0-372.19.1.el8_6.x86_64 - I0908 15:25:27.160857 1 main.go:50] running the hwlatdetect command with arguments [/usr/bin/hwlatdetect --threshold 1 --hardlimit 1 --duration 100 --window 10000000us --width 950000us] - F0908 15:27:10.603523 1 main.go:53] failed to run hwlatdetect command; out: hwlatdetect: test duration 100 seconds - detector: tracer - parameters: - Latency threshold: 1us <1> - Sample window: 10000000us - Sample width: 950000us - Non-sampling period: 9050000us - Output File: None - - Starting test - test finished - Max Latency: 326us <2> - Samples recorded: 5 - Samples exceeding threshold: 5 - ts: 1662650739.017274507, inner:6, outer:6 - ts: 1662650749.257272414, inner:14, outer:326 - ts: 1662650779.977272835, inner:314, outer:12 - ts: 1662650800.457272384, inner:3, outer:9 - ts: 1662650810.697273520, inner:3, outer:2 - -[...] - -JUnit report was created: /junit.xml/cnftests-junit.xml - - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the hwlatdetect image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:476 - -Ran 1 of 194 Specs in 365.797 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (366.08s) -FAIL ----- -<1> You can configure the latency threshold by using the `MAXIMUM_LATENCY` or the `HWLATDETECT_MAXIMUM_LATENCY` environment variables. -<2> The maximum latency value measured during the test. - -[discrete] -[id="cnf-performing-end-to-end-tests-example-results-hwlatdetect_{context}"] -== Example hwlatdetect test results - -You can capture the following types of results: - -* Rough results that are gathered after each run to create a history of impact on any changes made throughout the test. - -* The combined set of the rough tests with the best results and configuration settings. - -.Example of good results -[source, terminal] ----- -hwlatdetect: test duration 3600 seconds -detector: tracer -parameters: -Latency threshold: 10us -Sample window: 1000000us -Sample width: 950000us -Non-sampling period: 50000us -Output File: None - -Starting test -test finished -Max Latency: Below threshold -Samples recorded: 0 ----- - -The `hwlatdetect` tool only provides output if the sample exceeds the specified threshold. - -.Example of bad results -[source, terminal] ----- -hwlatdetect: test duration 3600 seconds -detector: tracer -parameters:Latency threshold: 10usSample window: 1000000us -Sample width: 950000usNon-sampling period: 50000usOutput File: None - -Starting tests:1610542421.275784439, inner:78, outer:81 -ts: 1610542444.330561619, inner:27, outer:28 -ts: 1610542445.332549975, inner:39, outer:38 -ts: 1610542541.568546097, inner:47, outer:32 -ts: 1610542590.681548531, inner:13, outer:17 -ts: 1610543033.818801482, inner:29, outer:30 -ts: 1610543080.938801990, inner:90, outer:76 -ts: 1610543129.065549639, inner:28, outer:39 -ts: 1610543474.859552115, inner:28, outer:35 -ts: 1610543523.973856571, inner:52, outer:49 -ts: 1610543572.089799738, inner:27, outer:30 -ts: 1610543573.091550771, inner:34, outer:28 -ts: 1610543574.093555202, inner:116, outer:63 ----- - -The output of `hwlatdetect` shows that multiple samples exceed the threshold. However, the same output can indicate different results based on the following factors: - -* The duration of the test -* The number of CPU cores -* The host firmware settings - -[WARNING] -==== -Before proceeding with the next latency test, ensure that the latency reported by `hwlatdetect` meets the required threshold. Fixing latencies introduced by hardware might require you to contact the system vendor support. - -Not all latency spikes are hardware related. Ensure that you tune the host firmware to meet your workload requirements. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/9/html-single/optimizing_rhel_9_for_real_time_for_low_latency_operation/index#setting-bios-parameters-for-system-tuning_optimizing-RHEL9-for-real-time-for-low-latency-operation[Setting firmware parameters for system tuning]. -==== diff --git a/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc b/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc deleted file mode 100644 index 06abc75cbe5b..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-in-single-node-cluster_{context}"] -= Running latency tests on a {sno} cluster - -You can run latency tests on {sno} clusters. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* To run the latency tests on a {sno} cluster, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=master \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -[NOTE] -==== -`ROLE_WORKER_CNF=master` is required because master is the only machine pool to which the node belongs. For more information about setting the required `MachineConfigPool` for the latency tests, see "Prerequisites for running latency tests". -==== -+ -After running the test suite, all the dangling resources are cleaned up. diff --git a/modules/cnf-performing-end-to-end-tests-running-oslat.adoc b/modules/cnf-performing-end-to-end-tests-running-oslat.adoc deleted file mode 100644 index a54d72cd5361..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-oslat.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-oslat_{context}"] -= Running oslat - -The `oslat` test simulates a CPU-intensive DPDK application and measures all the interruptions and disruptions to test how the cluster handles CPU heavy data processing. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. -* You have applied a cluster performance profile by using the Node Tuning Operator. - -.Procedure - -* To perform the `oslat` test, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_CPUS=10 -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="oslat" ----- -+ -`LATENCY_TEST_CPUS` specifies the list of CPUs to test with the `oslat` command. -+ -The command runs the `oslat` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (20 μs). -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal,subs="attributes+"] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=oslat -I0908 12:51:55.999393 27 request.go:601] Waited for 1.044848101s due to client-side throttling, not priority and fairness, request: GET:https://compute-1.example.com:6443/apis/machineconfiguration.openshift.io/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662641514 -Will run 1 of 194 specs - -[...] - -• Failure [77.833 seconds] -[performance] Latency Test -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:62 - with the oslat image - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:128 - should succeed [It] - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:153 - - The current latency 304 is bigger than the expected one 1 : <1> - -[...] - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the oslat image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:177 - -Ran 1 of 194 Specs in 161.091 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (161.42s) -FAIL ----- -<1> In this example, the measured latency is outside the maximum allowed value. diff --git a/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc b/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc deleted file mode 100644 index 018bdcde713a..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-the-tests_{context}"] -= Running the latency tests - -Run the cluster latency tests to validate node tuning for your Cloud-native Network Functions (CNF) workload. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Procedure - -. Open a shell prompt in the directory containing the `kubeconfig` file. -+ -You provide the test image with a `kubeconfig` file in current directory and its related `$KUBECONFIG` environment variable, mounted through a volume. This allows the running container to use the `kubeconfig` file from inside the container. - -. Run the latency tests by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -. Optional: Append `-ginkgo.dryRun` to run the latency tests in dry-run mode. This is useful for checking what the tests run. - -. Optional: Append `-ginkgo.v` to run the tests with increased verbosity. - -. Optional: To run the latency tests against a specific performance profile, run the following command, substituting appropriate values: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e FEATURES=performance -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ --e PERF_TEST_PROFILE= registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="[performance]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the name of the performance profile you want to run the latency tests against. --- -+ -[IMPORTANT] -==== -For valid latency test results, run the tests for at least 12 hours. -==== diff --git a/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc b/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc deleted file mode 100644 index f51213919621..000000000000 --- a/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-test-failure-report_{context}"] -= Generating a latency test failure report - -Use the following procedures to generate a JUnit latency test output and test failure report. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Create a test failure report with information about the cluster state and resources for troubleshooting by passing the `--report` parameter with the path to where the report is dumped: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -v $(pwd)/reportdest: \ --e KUBECONFIG=/kubeconfig/kubeconfig -e DISCOVERY_MODE=true -e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh --report \ --ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the path to the folder where the report is generated. --- diff --git a/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc b/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc deleted file mode 100644 index 6d74e4644d6b..000000000000 --- a/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-troubleshooting_{context}"] -= Troubleshooting errors with the cnf-tests container - -To run latency tests, the cluster must be accessible from within the `cnf-tests` container. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Verify that the cluster is accessible from inside the `cnf-tests` container by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -oc get nodes ----- -+ -If this command does not work, an error related to spanning across DNS, MTU size, or firewall access might be occurring. diff --git a/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc b/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc deleted file mode 100644 index d5f5e5493e4f..000000000000 --- a/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// CNF-950 4.7 Provisioning and deploying a Distributed Unit (DU) manually -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-provisioning-deploying-a-distributed-unit-manually_{context}"] -= Provisioning and deploying a distributed unit (DU) manually - -Radio access network (RAN) is composed of central units (CU), distributed units (DU), and radio units (RU). -RAN from the telecommunications standard perspective is shown below: - -image::135_OpenShift_Distributed_Unit_0121.svg[High level RAN overview] - -From the three components composing RAN, the CU and DU can be virtualized and implemented as cloud-native functions. - -The CU and DU split architecture is driven by real-time computing and networking requirements. A DU can be seen as a real-time part of a -telecommunication baseband unit. -One distributed unit may aggregate several cells. A CU can be seen as a non-realtime part of a baseband unit, aggregating -traffic from one or more distributed units. - -A cell in the context of a DU can be seen as a real-time application performing intensive digital signal processing, data transfer, -and algorithmic tasks. -Cells often use hardware acceleration (FPGA, GPU, eASIC) for DSP processing offload, but there are also software-only implementations -(FlexRAN), based on AVX-512 instructions. - -Running cell application on COTS hardware requires the following features to be enabled: - -* Real-time kernel -* CPU isolation -* NUMA awareness -* Huge pages memory management -* Precision timing synchronization using PTP -* AVX-512 instruction set (for Flexran and / or FPGA implementation) -* Additional features depending on the RAN Operator requirements - -Accessing hardware acceleration devices and high throughput network interface controllers by virtualized software applications -requires use of SR-IOV and Passthrough PCI device virtualization. - -In addition to the compute and acceleration requirements, DUs operate on multiple internal and external networks. - -[id="cnf-manifest-structure_{context}"] -== The manifest structure - -The profile is built from one cluster specific folder and one or more site-specific folders. -This is done to address a deployment that includes remote worker nodes, with several sites belonging to the same cluster. - -The [`cluster-config`](ran-profile/cluster-config) directory contains performance and PTP customizations based upon -Operator deployments in [`deploy`](../feature-configs/deploy) folder. - -The [`site.1.fqdn`](site.1.fqdn) folder contains site-specific network customizations. - -[id="cnf-du-prerequisites_{context}"] -== Prerequisites - -Before installing the Operators and deploying the DU, perform the following steps. - -. Create a machine config pool for the RAN worker nodes. For example: -+ -[source,terminal] ----- -cat < node-role.kubernetes.io/worker-cnf="" ----- - -. Label the node as PTP slave (DU only): -+ -[source,terminal] ----- -$ oc label --overwrite node/ ptp/slave="" ----- - -[id="cnf-du-configuration-notes_{context}"] -== SR-IOV configuration notes - -The `SriovNetworkNodePolicy` object must be configured differently for different NIC models and placements. - -|==================== -|*Manufacturer* |*deviceType* |*isRdma* -|Intel |vfio-pci or netdevice |false -|Mellanox |netdevice |structure -|==================== - -In addition, when configuring the `nicSelector`, the `pfNames` value must match the intended interface name on the specific host. - -If there is a mixed cluster where some of the nodes are deployed with Intel NICs and some with Mellanox, several SR-IOV configurations can be -created with the same `resourceName`. The device plugin will discover only the available ones and will put the capacity on the node accordingly. diff --git a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc b/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc deleted file mode 100644 index a05e5825cdfc..000000000000 --- a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc +++ /dev/null @@ -1,447 +0,0 @@ -// CNF-489 Real time and low latency workload provisioning -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-provisioning-real-time-and-low-latency-workloads_{context}"] -= Provisioning real-time and low latency workloads - -Many industries and organizations need extremely high performance computing and might require low and predictable latency, especially in the financial and telecommunications industries. For these industries, with their unique requirements, {product-title} provides the Node Tuning Operator to implement automatic tuning to achieve low latency performance and consistent response time for {product-title} applications. - -The cluster administrator can use this performance profile configuration to make these changes in a more reliable way. The administrator can specify whether to update the kernel to kernel-rt (real-time), reserve CPUs for cluster and operating system housekeeping duties, including pod infra containers, isolate CPUs for application containers to run the workloads, and disable unused CPUs to reduce power consumption. - -[WARNING] -==== -The usage of execution probes in conjunction with applications that require guaranteed CPUs can cause latency spikes. It is recommended to use other probes, such as a properly configured set of network probes, as an alternative. -==== - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance for OpenShift applications. In {product-title} 4.11 and later, these functions are part of the Node Tuning Operator. -==== - -[id="node-tuning-operator-known-limitations-for-real-time_{context}"] -== Known limitations for real-time - -[NOTE] -==== -In most deployments, kernel-rt is supported only on worker nodes when you use a standard cluster with three control plane nodes and three worker nodes. There are exceptions for compact and single nodes on {product-title} deployments. For installations on a single node, kernel-rt is supported on the single control plane node. -==== - -To fully utilize the real-time mode, the containers must run with elevated privileges. -See link:https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container[Set capabilities for a Container] for information on granting privileges. - -{product-title} restricts the allowed capabilities, so you might need to create a `SecurityContext` as well. - -[NOTE] -==== -This procedure is fully supported with bare metal installations using {op-system-first} systems. -==== - -Establishing the right performance expectations refers to the fact that the real-time kernel is not a panacea. Its objective is consistent, low-latency determinism offering predictable response times. There is some additional kernel overhead associated with the real-time kernel. This is due primarily to handling hardware interruptions in separately scheduled threads. The increased overhead in some workloads results in some degradation in overall throughput. The exact amount of degradation is very workload dependent, ranging from 0% to 30%. However, it is the cost of determinism. - -[id="node-tuning-operator-provisioning-worker-with-real-time-capabilities_{context}"] -== Provisioning a worker with real-time capabilities - -. Optional: Add a node to the {product-title} cluster. -See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/8/html/optimizing_rhel_8_for_real_time_for_low_latency_operation/setting-bios-parameters-for-system-tuning_optimizing-rhel8-for-real-time-for-low-latency-operation[Setting BIOS parameters for system tuning]. - -. Add the label `worker-rt` to the worker nodes that require the real-time capability by using the `oc` command. - -. Create a new machine config pool for real-time nodes: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-rt - labels: - machineconfiguration.openshift.io/role: worker-rt -spec: - machineConfigSelector: - matchExpressions: - - { - key: machineconfiguration.openshift.io/role, - operator: In, - values: [worker, worker-rt], - } - paused: false - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-rt: "" ----- -Note that a machine config pool worker-rt is created for group of nodes that have the label `worker-rt`. - -. Add the node to the proper machine config pool by using node role labels. -+ -[NOTE] -==== -You must decide which nodes are configured with real-time workloads. You could configure all of the nodes in the cluster, or a subset of the nodes. The Node Tuning Operator that expects all of the nodes are part of a dedicated machine config pool. If you use all of the nodes, you must point the Node Tuning Operator to the worker node role label. If you use a subset, you must group the nodes into a new machine config pool. -==== -. Create the `PerformanceProfile` with the proper set of housekeeping cores and `realTimeKernel: enabled: true`. - -. You must set `machineConfigPoolSelector` in `PerformanceProfile`: -+ -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: example-performanceprofile - spec: - ... - realTimeKernel: - enabled: true - nodeSelector: - node-role.kubernetes.io/worker-rt: "" - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-rt ----- -. Verify that a matching machine config pool exists with a label: -+ -[source,terminal] ----- -$ oc describe mcp/worker-rt ----- -+ -.Example output -[source,yaml] ----- -Name: worker-rt -Namespace: -Labels: machineconfiguration.openshift.io/role=worker-rt ----- - -. {product-title} will start configuring the nodes, which might involve multiple reboots. Wait for the nodes to settle. This can take a long time depending on the specific hardware you use, but 20 minutes per node is expected. - -. Verify everything is working as expected. - -[id="node-tuning-operator-verifying-real-time-kernel-installation_{context}"] -== Verifying the real-time kernel installation - -Use this command to verify that the real-time kernel is installed: - -[source,terminal] ----- -$ oc get node -o wide ----- - -Note the worker with the role `worker-rt` that contains the string `4.18.0-305.30.1.rt7.102.el8_4.x86_64 cri-o://1.27.3-99.rhaos4.10.gitc3131de.el8`: - -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION INTERNAL-IP -EXTERNAL-IP OS-IMAGE KERNEL-VERSION -CONTAINER-RUNTIME -rt-worker-0.example.com Ready worker,worker-rt 5d17h v1.27.3 -128.66.135.107 Red Hat Enterprise Linux CoreOS 46.82.202008252340-0 (Ootpa) -4.18.0-305.30.1.rt7.102.el8_4.x86_64 cri-o://1.27.3-99.rhaos4.10.gitc3131de.el8 -[...] ----- - -[id="node-tuning-operator-creating-workload-that-works-in-real-time_{context}"] -== Creating a workload that works in real-time - -Use the following procedures for preparing a workload that will use real-time capabilities. - -.Procedure - -. Create a pod with a QoS class of `Guaranteed`. -. Optional: Disable CPU load balancing for DPDK. -. Assign a proper node selector. - -When writing your applications, follow the general recommendations described in -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/8/html-single/tuning_guide/index#chap-Application_Tuning_and_Deployment[Application tuning and deployment]. - -[id="node-tuning-operator-creating-pod-with-guaranteed-qos-class_{context}"] -== Creating a pod with a QoS class of `Guaranteed` - -Keep the following in mind when you create a pod that is given a QoS class of `Guaranteed`: - -* Every container in the pod must have a memory limit and a memory request, and they must be the same. -* Every container in the pod must have a CPU limit and a CPU request, and they must be the same. - -The following example shows the configuration file for a pod that has one container. The container has a memory limit and a memory request, both equal to 200 MiB. The container has a CPU limit and a CPU request, both equal to 1 CPU. - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: qos-demo - namespace: qos-example -spec: - containers: - - name: qos-demo-ctr - image: - resources: - limits: - memory: "200Mi" - cpu: "1" - requests: - memory: "200Mi" - cpu: "1" ----- - -. Create the pod: -+ -[source,terminal] ----- -$ oc apply -f qos-pod.yaml --namespace=qos-example ----- - -. View detailed information about the pod: -+ -[source,terminal] ----- -$ oc get pod qos-demo --namespace=qos-example --output=yaml ----- -+ -.Example output -[source,yaml] ----- -spec: - containers: - ... -status: - qosClass: Guaranteed ----- -+ -[NOTE] -==== -If a container specifies its own memory limit, but does not specify a memory request, {product-title} automatically assigns a memory request that matches the limit. Similarly, if a container specifies its own CPU limit, but does not specify a CPU request, {product-title} automatically assigns a CPU request that matches the limit. -==== - -[id="node-tuning-operator-disabling-cpu-load-balancing-for-dpdk_{context}"] -== Optional: Disabling CPU load balancing for DPDK - -Functionality to disable or enable CPU load balancing is implemented on the CRI-O level. The code under the CRI-O disables or enables CPU load balancing only when the following requirements are met. - -* The pod must use the `performance-` runtime class. You can get the proper name by looking at the status of the performance profile, as shown here: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -... -status: - ... - runtimeClass: performance-manual ----- - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported with cgroup v2. -==== - -The Node Tuning Operator is responsible for the creation of the high-performance runtime handler config snippet under relevant nodes and for creation of the high-performance runtime class under the cluster. It will have the same content as default runtime handler except it enables the CPU load balancing configuration functionality. - -To disable the CPU load balancing for the pod, the `Pod` specification must include the following fields: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - ... - annotations: - ... - cpu-load-balancing.crio.io: "disable" - ... - ... -spec: - ... - runtimeClassName: performance- - ... ----- - -[NOTE] -==== -Only disable CPU load balancing when the CPU manager static policy is enabled and for pods with guaranteed QoS that use whole CPUs. Otherwise, disabling CPU load balancing can affect the performance of other containers in the cluster. -==== - -[id="node-tuning-operator-assigning-proper-node-selector_{context}"] -== Assigning a proper node selector - -The preferred way to assign a pod to nodes is to use the same node selector the performance profile used, as shown here: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: example -spec: - # ... - nodeSelector: - node-role.kubernetes.io/worker-rt: "" ----- - -For more information, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.5/html-single/nodes/index#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors]. - -[id="node-tuning-operator-scheduling-workload-onto-worker-with-real-time-capabilities_{context}"] -== Scheduling a workload onto a worker with real-time capabilities - -Use label selectors that match the nodes attached to the machine config pool that was configured for low latency by the Node Tuning Operator. For more information, see link:https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/[Assigning pods to nodes]. - -[id="node-tuning-operator-disabling-CPUs-for-power-consumption_{context}"] -== Reducing power consumption by taking CPUs offline - -You can generally anticipate telecommunication workloads. When not all of the CPU resources are required, the Node Tuning Operator allows you take unused CPUs offline to reduce power consumption by manually updating the performance profile. - -To take unused CPUs offline, you must perform the following tasks: - -. Set the offline CPUs in the performance profile and save the contents of the YAML file: -+ -.Example performance profile with offlined CPUs -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - additionalKernelArgs: - - nmi_watchdog=0 - - audit=0 - - mce=off - - processor.max_cstate=1 - - intel_idle.max_cstate=0 - - idle=poll - cpu: - isolated: "2-23,26-47" - reserved: "0,1,24,25" - offlined: “48-59” <1> - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: single-numa-node - realTimeKernel: - enabled: true ----- -<1> Optional. You can list CPUs in the `offlined` field to take the specified CPUs offline. - -. Apply the updated profile by running the following command: -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- - -[id="node-tuning-operator-pod-power-saving-config_{context}"] -== Optional: Power saving configurations - -You can enable power savings for a node that has low priority workloads that are colocated with high priority workloads without impacting the latency or throughput of the high priority workloads. Power saving is possible without modifications to the workloads themselves. - -[IMPORTANT] -==== -The feature is supported on Intel Ice Lake and later generations of Intel CPUs. The capabilities of the processor might impact the latency and throughput of the high priority workloads. -==== - -When you configure a node with a power saving configuration, you must configure high priority workloads with performance configuration at the pod level, which means that the configuration applies to all the cores used by the pod. - -By disabling P-states and C-states at the pod level, you can configure high priority workloads for best performance and lowest latency. - -.Configuration for high priority workloads -[cols="1,2", options="header"] -|==== -|Annotation -|Description - -a|[source,yaml] ----- -annotations: - cpu-c-states.crio.io: "disable" - cpu-freq-governor.crio.io: "" ----- -|Provides the best performance for a pod by disabling C-states and specifying the governor type for CPU scaling. The `performance` governor is recommended for high priority workloads. -|==== - - -.Prerequisites - -* You enabled C-states and OS-controlled P-states in the BIOS - -.Procedure - -. Generate a `PerformanceProfile` with `per-pod-power-management` set to `true`: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v \ -/must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} \ ---mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true \ ---split-reserved-cpus-across-numa=false --topology-manager-policy=single-numa-node \ ---must-gather-dir-path /must-gather -power-consumption-mode=low-latency \ <1> ---per-pod-power-management=true > my-performance-profile.yaml ----- -<1> The `power-consumption-mode` must be `default` or `low-latency` when the `per-pod-power-management` is set to `true`. - -+ -.Example `PerformanceProfile` with `perPodPowerManagement` - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - [.....] - workloadHints: - realTime: true - highPowerConsumption: false - perPodPowerManagement: true ----- - -. Set the default `cpufreq` governor as an additional kernel argument in the `PerformanceProfile` custom resource (CR): -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - ... - additionalKernelArgs: - - cpufreq.default_governor=schedutil <1> ----- -<1> Using the `schedutil` governor is recommended, however, you can use other governors such as the `ondemand` or `powersave` governors. - -. Set the maximum CPU frequency in the `TunedPerformancePatch` CR: -+ -[source,yaml] ----- -spec: - profile: - - data: | - [sysfs] - /sys/devices/system/cpu/intel_pstate/max_perf_pct = <1> ----- -<1> The `max_perf_pct` controls the maximum frequency the `cpufreq` driver is allowed to set as a percentage of the maximum supported cpu frequency. This value applies to all CPUs. You can check the maximum supported frequency in `/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq`. As a starting point, you can use a percentage that caps all CPUs at the `All Cores Turbo` frequency. The `All Cores Turbo` frequency is the frequency that all cores will run at when the cores are all fully occupied. - -. Add the desired annotations to your high priority workload pods. The annotations override the `default` settings. -+ -.Example high priority workload annotation -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - ... - annotations: - ... - cpu-c-states.crio.io: "disable" - cpu-freq-governor.crio.io: "" - ... - ... -spec: - ... - runtimeClassName: performance- - ... ----- - -. Restart the pods. diff --git a/modules/cnf-reducing-netqueues-using-nto.adoc b/modules/cnf-reducing-netqueues-using-nto.adoc deleted file mode 100644 index 791022d4baf9..000000000000 --- a/modules/cnf-reducing-netqueues-using-nto.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/low-latency-tuning.adoc - -[id="reducing-nic-queues-using-the-node-tuning-operator_{context}"] -= Reducing NIC queues using the Node Tuning Operator - -The Node Tuning Operator allows you to adjust the network interface controller (NIC) queue count for each network device by configuring the performance profile. Device network queues allows the distribution of packets among different physical queues and each queue gets a separate thread for packet processing. - -In real-time or low latency systems, all the unnecessary interrupt request lines (IRQs) pinned to the isolated CPUs must be moved to reserved or housekeeping CPUs. - -In deployments with applications that require system, {product-title} networking or in mixed deployments with Data Plane Development Kit (DPDK) workloads, multiple queues are needed to achieve good throughput and the number of NIC queues should be adjusted or remain unchanged. For example, to achieve low latency the number of NIC queues for DPDK based workloads should be reduced to just the number of reserved or housekeeping CPUs. - -Too many queues are created by default for each CPU and these do not fit into the interrupt tables for housekeeping CPUs when tuning for low latency. Reducing the number of queues makes proper tuning possible. Smaller number of queues means a smaller number of interrupts that then fit in the IRQ table. - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. -==== diff --git a/modules/cnf-rfhe-notifications-api-refererence.adoc b/modules/cnf-rfhe-notifications-api-refererence.adoc deleted file mode 100644 index afb67c84bdc2..000000000000 --- a/modules/cnf-rfhe-notifications-api-refererence.adoc +++ /dev/null @@ -1,161 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: REFERENCE -[id="cnf-rfhe-notifications-api-refererence_{context}"] -= Subscribing applications to bare-metal events REST API reference - -Use the bare-metal events REST API to subscribe an application to the bare-metal events that are generated on the parent node. - -Subscribe applications to Redfish events by using the resource address `/cluster/node//redfish/event`, where `` is the cluster node running the application. - -Deploy your `cloud-event-consumer` application container and `cloud-event-proxy` sidecar container in a separate application pod. The `cloud-event-consumer` application subscribes to the `cloud-event-proxy` container in the application pod. - -Use the following API endpoints to subscribe the `cloud-event-consumer` application to Redfish events posted by the `cloud-event-proxy` container at [x-]`http://localhost:8089/api/ocloudNotifications/v1/` in the application pod: - -* `/api/ocloudNotifications/v1/subscriptions` -- `POST`: Creates a new subscription -- `GET`: Retrieves a list of subscriptions -* `/api/ocloudNotifications/v1/subscriptions/` -- `GET`: Returns details for the specified subscription ID -* `api/ocloudNotifications/v1/subscriptions/status/` -- `PUT`: Creates a new status ping request for the specified subscription ID -* `/api/ocloudNotifications/v1/health` -- `GET`: Returns the health status of `ocloudNotifications` API - -[NOTE] -==== -`9089` is the default port for the `cloud-event-consumer` container deployed in the application pod. You can configure a different port for your application as required. -==== - -[discrete] -== api/ocloudNotifications/v1/subscriptions - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Returns a list of subscriptions. If subscriptions exist, a `200 OK` status code is returned along with the list of subscriptions. - -.Example API response -[source,json] ----- -[ - { - "id": "ca11ab76-86f9-428c-8d3a-666c24e34d32", - "endpointUri": "http://localhost:9089/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions/ca11ab76-86f9-428c-8d3a-666c24e34d32", - "resource": "/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" - } -] ----- - -[discrete] -=== HTTP method - -`POST api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Creates a new subscription. If a subscription is successfully created, or if it already exists, a `201 Created` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| subscription -| data -|=== - -.Example payload -[source,json] ----- -{ - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions", - "resource": "/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" -} ----- - -[discrete] -== api/ocloudNotifications/v1/subscriptions/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions/` - -[discrete] -==== Description - -Returns details for the subscription with ID `` - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{ - "id":"ca11ab76-86f9-428c-8d3a-666c24e34d32", - "endpointUri":"http://localhost:9089/api/ocloudNotifications/v1/dummy", - "uriLocation":"http://localhost:8089/api/ocloudNotifications/v1/subscriptions/ca11ab76-86f9-428c-8d3a-666c24e34d32", - "resource":"/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" -} ----- - -[discrete] -== api/ocloudNotifications/v1/subscriptions/status/ - -[discrete] -=== HTTP method - -`PUT api/ocloudNotifications/v1/subscriptions/status/` - -[discrete] -==== Description - -Creates a new status ping request for subscription with ID ``. If a subscription is present, the status request is successful and a `202 Accepted` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{"status":"ping sent"} ----- - -[discrete] -== api/ocloudNotifications/v1/health/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/health/` - -[discrete] -==== Description - -Returns the health status for the `ocloudNotifications` REST API. - -.Example API response -[source,terminal] ----- -OK ----- diff --git a/modules/cnf-running-the-performance-creator-profile-offline.adoc b/modules/cnf-running-the-performance-creator-profile-offline.adoc deleted file mode 100644 index ba395b4ea2cb..000000000000 --- a/modules/cnf-running-the-performance-creator-profile-offline.adoc +++ /dev/null @@ -1,262 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="running-the-performance-profile-creator-wrapper-script_{context}"] -= Running the Performance Profile Creator wrapper script - -The performance profile wrapper script simplifies the running of the Performance Profile Creator (PPC) tool. It hides the complexities associated with running `podman` and specifying the mapping directories and it enables the creation of the performance profile. - -.Prerequisites - -* Access to the Node Tuning Operator image. -* Access to the `must-gather` tarball. - -.Procedure - -. Create a file on your local machine named, for example, `run-perf-profile-creator.sh`: -+ -[source,terminal] ----- -$ vi run-perf-profile-creator.sh ----- - -. Paste the following code into the file: -+ -[source,bash,subs="attributes+"] ----- -#!/bin/bash - -readonly CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman} -readonly CURRENT_SCRIPT=$(basename "$0") -readonly CMD="${CONTAINER_RUNTIME} run --entrypoint performance-profile-creator" -readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists" -readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull" -readonly MUST_GATHER_VOL="/must-gather" - -NTO_IMG="registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version}" -MG_TARBALL="" -DATA_DIR="" - -usage() { - print "Wrapper usage:" - print " ${CURRENT_SCRIPT} [-h] [-p image][-t path] -- [performance-profile-creator flags]" - print "" - print "Options:" - print " -h help for ${CURRENT_SCRIPT}" - print " -p Node Tuning Operator image" - print " -t path to a must-gather tarball" - - ${IMG_EXISTS_CMD} "${NTO_IMG}" && ${CMD} "${NTO_IMG}" -h -} - -function cleanup { - [ -d "${DATA_DIR}" ] && rm -rf "${DATA_DIR}" -} -trap cleanup EXIT - -exit_error() { - print "error: $*" - usage - exit 1 -} - -print() { - echo "$*" >&2 -} - -check_requirements() { - ${IMG_EXISTS_CMD} "${NTO_IMG}" || ${IMG_PULL_CMD} "${NTO_IMG}" || \ - exit_error "Node Tuning Operator image not found" - - [ -n "${MG_TARBALL}" ] || exit_error "Must-gather tarball file path is mandatory" - [ -f "${MG_TARBALL}" ] || exit_error "Must-gather tarball file not found" - - DATA_DIR=$(mktemp -d -t "${CURRENT_SCRIPT}XXXX") || exit_error "Cannot create the data directory" - tar -zxf "${MG_TARBALL}" --directory "${DATA_DIR}" || exit_error "Cannot decompress the must-gather tarball" - chmod a+rx "${DATA_DIR}" - - return 0 -} - -main() { - while getopts ':hp:t:' OPT; do - case "${OPT}" in - h) - usage - exit 0 - ;; - p) - NTO_IMG="${OPTARG}" - ;; - t) - MG_TARBALL="${OPTARG}" - ;; - ?) - exit_error "invalid argument: ${OPTARG}" - ;; - esac - done - shift $((OPTIND - 1)) - - check_requirements || exit 1 - - ${CMD} -v "${DATA_DIR}:${MUST_GATHER_VOL}:z" "${NTO_IMG}" "$@" --must-gather-dir-path "${MUST_GATHER_VOL}" - echo "" 1>&2 -} - -main "$@" ----- - -. Add execute permissions for everyone on this script: -+ -[source,terminal] ----- -$ chmod a+x run-perf-profile-creator.sh ----- - -. Optional: Display the `run-perf-profile-creator.sh` command usage: -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -h ----- -+ -.Expected output -+ -[source,terminal] ----- -Wrapper usage: - run-perf-profile-creator.sh [-h] [-p image][-t path] -- [performance-profile-creator flags] - -Options: - -h help for run-perf-profile-creator.sh - -p Node Tuning Operator image <1> - -t path to a must-gather tarball <2> -A tool that automates creation of Performance Profiles - -Usage: - performance-profile-creator [flags] - -Flags: - --disable-ht Disable Hyperthreading - -h, --help help for performance-profile-creator - --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log") - --mcp-name string MCP name corresponding to the target machines (required) - --must-gather-dir-path string Must gather directory path (default "must-gather") - --offlined-cpu-count int Number of offlined CPUs - --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default") - --profile-name string Name of the performance profile to be created (default "performance") - --reserved-cpu-count int Number of reserved CPUs (required) - --rt-kernel Enable Real Time Kernel (required) - --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes - --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted") - --user-level-networking Run with User level Networking(DPDK) enabled ----- -+ -[NOTE] -==== -There two types of arguments: - -* Wrapper arguments namely `-h`, `-p` and `-t` -* PPC arguments -==== -+ -<1> Optional: Specify the Node Tuning Operator image. If not set, the default upstream image is used: `registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version}`. -<2> `-t` is a required wrapper script argument and specifies the path to a `must-gather` tarball. - -. Run the performance profile creator tool in discovery mode: -+ -[NOTE] -==== -Discovery mode inspects your cluster using the output from `must-gather`. The output produced includes information on: - -* The NUMA cell partitioning with the allocated CPU IDs -* Whether hyperthreading is enabled - -Using this information you can set appropriate values for some of the arguments supplied to the Performance Profile Creator tool. -==== -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -t /must-gather/must-gather.tar.gz -- --info=log ----- -+ -[NOTE] -==== -The `info` option requires a value which specifies the output format. Possible values are log and JSON. The JSON format is reserved for debugging. -==== - -. Check the machine config pool: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Example output - -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-acd1358917e9f98cbdb599aea622d78b True False False 3 3 3 0 22h -worker-cnf rendered-worker-cnf-1d871ac76e1951d32b2fe92369879826 False True False 2 1 1 0 22h ----- - -. Create a performance profile: -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -t /must-gather/must-gather.tar.gz -- --mcp-name=worker-cnf --reserved-cpu-count=2 --rt-kernel=true > my-performance-profile.yaml ----- -+ -[NOTE] -==== -The Performance Profile Creator arguments are shown in the Performance Profile Creator arguments table. The following arguments are required: - -* `reserved-cpu-count` -* `mcp-name` -* `rt-kernel` - -The `mcp-name` argument in this example is set to `worker-cnf` based on the output of the command `oc get mcp`. For {sno} use `--mcp-name=master`. -==== - -. Review the created YAML file: -+ -[source,terminal] ----- -$ cat my-performance-profile.yaml ----- -.Example output -+ -[source,terminal] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 1-39,41-79 - reserved: 0,40 - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: false ----- - -. Apply the generated profile: -+ -[NOTE] -==== -Install the Node Tuning Operator before applying the profile. -==== - -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- diff --git a/modules/cnf-running-the-performance-creator-profile.adoc b/modules/cnf-running-the-performance-creator-profile.adoc deleted file mode 100644 index 49989b853e12..000000000000 --- a/modules/cnf-running-the-performance-creator-profile.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="running-the-performance-profile-profile-cluster-using-podman_{context}"] -= Running the Performance Profile Creator using podman - -As a cluster administrator, you can run `podman` and the Performance Profile Creator to create a performance profile. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* A cluster installed on bare-metal hardware. -* A node with `podman` and OpenShift CLI (`oc`) installed. -* Access to the Node Tuning Operator image. - -.Procedure - -. Check the machine config pool: -+ -[source,terminal] ----- -$ oc get mcp ----- -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-acd1358917e9f98cbdb599aea622d78b True False False 3 3 3 0 22h -worker-cnf rendered-worker-cnf-1d871ac76e1951d32b2fe92369879826 False True False 2 1 1 0 22h ----- - -. Use Podman to authenticate to `registry.redhat.io`: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- -+ -[source,bash] ----- -Username: -Password: ----- - -. Optional: Display help for the PPC tool: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --rm --entrypoint performance-profile-creator registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} -h ----- -+ -.Example output -+ -[source,terminal] ----- -A tool that automates creation of Performance Profiles - -Usage: - performance-profile-creator [flags] - -Flags: - --disable-ht Disable Hyperthreading - -h, --help help for performance-profile-creator - --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log") - --mcp-name string MCP name corresponding to the target machines (required) - --must-gather-dir-path string Must gather directory path (default "must-gather") - --offlined-cpu-count int Number of offlined CPUs - --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default") - --profile-name string Name of the performance profile to be created (default "performance") - --reserved-cpu-count int Number of reserved CPUs (required) - --rt-kernel Enable Real Time Kernel (required) - --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes - --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted") - --user-level-networking Run with User level Networking(DPDK) enabled ----- - -. Run the Performance Profile Creator tool in discovery mode: -+ -[NOTE] -==== -Discovery mode inspects your cluster using the output from `must-gather`. The output produced includes information on: - -* The NUMA cell partitioning with the allocated CPU ids -* Whether hyperthreading is enabled - -Using this information you can set appropriate values for some of the arguments supplied to the Performance Profile Creator tool. -==== -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --info log --must-gather-dir-path /must-gather ----- -+ -[NOTE] -==== -This command uses the performance profile creator as a new entry point to `podman`. It maps the `must-gather` data for the host into the container image and invokes the required user-supplied profile arguments to produce the `my-performance-profile.yaml` file. - -The `-v` option can be the path to either: - -* The `must-gather` output directory -* An existing directory containing the `must-gather` decompressed tarball - -The `info` option requires a value which specifies the output format. Possible values are log and JSON. The JSON format is reserved for debugging. -==== - -. Run `podman`: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --mcp-name=worker-cnf --reserved-cpu-count=4 --rt-kernel=true --split-reserved-cpus-across-numa=false --must-gather-dir-path /must-gather --power-consumption-mode=ultra-low-latency --offlined-cpu-count=6 > my-performance-profile.yaml ----- -+ -[NOTE] -==== -The Performance Profile Creator arguments are shown in the Performance Profile Creator arguments table. The following arguments are required: - -* `reserved-cpu-count` -* `mcp-name` -* `rt-kernel` - -The `mcp-name` argument in this example is set to `worker-cnf` based on the output of the command `oc get mcp`. For {sno} use `--mcp-name=master`. -==== - -. Review the created YAML file: -+ -[source,terminal] ----- -$ cat my-performance-profile.yaml ----- -.Example output -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 2-39,48-79 - offlined: 42-47 - reserved: 0-1,40-41 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true - workloadHints: - highPowerConsumption: true - realTime: true ----- - -. Apply the generated profile: -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- diff --git a/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc b/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc deleted file mode 100644 index 78e74391fc4f..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc -:_content-type: CONCEPT -[id="cnf-scheduling-numa-aware-workloads-with-manual-perofrmance-settings_{context}"] -= Scheduling NUMA-aware workloads with manual performance settings - -Clusters running latency-sensitive workloads typically feature performance profiles that help to minimize workload latency and optimize performance. However, you can schedule NUMA-aware workloads in a pristine cluster that does not feature a performance profile. The following workflow features a pristine cluster that you can manually configure for performance by using the `KubeletConfig` resource. This is not the typical environment for scheduling NUMA-aware workloads. diff --git a/modules/cnf-scheduling-numa-aware-workloads-overview.adoc b/modules/cnf-scheduling-numa-aware-workloads-overview.adoc deleted file mode 100644 index 970da3a69bb9..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-overview.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc -:_content-type: CONCEPT -[id="cnf-scheduling-numa-aware-workloads-overview_{context}"] -= Scheduling NUMA-aware workloads - -Clusters running latency-sensitive workloads typically feature performance profiles that help to minimize workload latency and optimize performance. The NUMA-aware scheduler deploys workloads based on available node NUMA resources and with respect to any performance profile settings applied to the node. The combination of NUMA-aware deployments, and the performance profile of the workload, ensures that workloads are scheduled in a way that maximizes performance. \ No newline at end of file diff --git a/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc b/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc deleted file mode 100644 index 378b7b5136e3..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc +++ /dev/null @@ -1,200 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings_{context}"] -= Scheduling workloads with the NUMA-aware scheduler with manual performance settings - -You can schedule workloads with the NUMA-aware scheduler using `Deployment` CRs that specify the minimum required resources to process the workload. - -The following example deployment uses NUMA-aware scheduling for a sample workload. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Get the name of the NUMA-aware scheduler that is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Create a `Deployment` CR that uses scheduler named `topo-aware-scheduler`, for example: - -.. Save the following YAML in the `nro-deployment.yaml` file: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: numa-deployment-1 - namespace: <1> -spec: - replicas: 1 - selector: - matchLabels: - app: test - template: - metadata: - labels: - app: test - spec: - schedulerName: topo-aware-scheduler <2> - containers: - - name: ctnr - image: quay.io/openshifttest/hello-openshift:openshift - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "10" - requests: - memory: "100Mi" - cpu: "10" - - name: ctnr2 - image: gcr.io/google_containers/pause-amd64:3.0 - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "8" - requests: - memory: "100Mi" - cpu: "8" ----- -<1> Replace with the namespace for your deployment. -<2> `schedulerName` must match the name of the NUMA-aware scheduler that is deployed in your cluster, for example `topo-aware-scheduler`. - -.. Create the `Deployment` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-deployment.yaml ----- - -.Verification - -. Verify that the deployment was successful: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numa-deployment-1-56954b7b46-pfgw8 2/2 Running 0 129m -numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 15h -numaresourcesoperator-worker-dvj4n 2/2 Running 0 18h -numaresourcesoperator-worker-lcg4t 2/2 Running 0 16h -secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 18h ----- - -. Verify that the `topo-aware-scheduler` is scheduling the deployed pod by running the following command: -+ -[source,terminal] ----- -$ oc describe pod numa-deployment-1-56954b7b46-pfgw8 -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 130m topo-aware-scheduler Successfully assigned openshift-numaresources/numa-deployment-1-56954b7b46-pfgw8 to compute-0.example.com ----- -+ -[NOTE] -==== -Deployments that request more resources than is available for scheduling will fail with a `MinimumReplicasUnavailable` error. The deployment succeeds when the required resources become available. Pods remain in the `Pending` state until the required resources are available. -==== - -. Verify that the expected allocated resources are listed for the node. - -.. Identify the node that is running the deployment pod by running the following command, replacing with the namespace you specified in the `Deployment` CR: -+ -[source,terminal] ----- -$ oc get pods -n -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -numa-deployment-1-65684f8fcc-bw4bw 0/2 Running 0 82m 10.128.2.50 worker-0 ----- -+ -.. Run the following command, replacing with the name of that node that is running the deployment pod: -+ -[source,terminal] ----- -$ oc describe noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -... - -Zones: - Costs: - Name: node-0 - Value: 10 - Name: node-1 - Value: 21 - Name: node-0 - Resources: - Allocatable: 39 - Available: 21 <1> - Capacity: 40 - Name: cpu - Allocatable: 6442450944 - Available: 6442450944 - Capacity: 6442450944 - Name: hugepages-1Gi - Allocatable: 134217728 - Available: 134217728 - Capacity: 134217728 - Name: hugepages-2Mi - Allocatable: 262415904768 - Available: 262206189568 - Capacity: 270146007040 - Name: memory - Type: Node ----- -<1> The `Available` capacity is reduced because of the resources that have been allocated to the guaranteed pod. -+ -Resources consumed by guaranteed pods are subtracted from the available node resources listed under `noderesourcetopologies.topology.node.k8s.io`. - -. Resource allocations for pods with a `Best-effort` or `Burstable` quality of service (`qosClass`) are not reflected in the NUMA node resources under `noderesourcetopologies.topology.node.k8s.io`. If a pod's consumed resources are not reflected in the node resource calculation, verify that the pod has `qosClass` of `Guaranteed` and the CPU request is an integer value, not a decimal value. You can verify the that the pod has a `qosClass` of `Guaranteed` by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n -o jsonpath="{ .status.qosClass }" ----- -+ -.Example output -[source,terminal] ----- -Guaranteed ----- diff --git a/modules/cnf-scheduling-numa-aware-workloads.adoc b/modules/cnf-scheduling-numa-aware-workloads.adoc deleted file mode 100644 index 6e4e7c00ba6c..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-scheduling-numa-aware-workloads_{context}"] -= Scheduling workloads with the NUMA-aware scheduler - -You can schedule workloads with the NUMA-aware scheduler using `Deployment` CRs that specify the minimum required resources to process the workload. - -The following example deployment uses NUMA-aware scheduling for a sample workload. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Get the name of the NUMA-aware scheduler that is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Create a `Deployment` CR that uses scheduler named `topo-aware-scheduler`, for example: - -.. Save the following YAML in the `nro-deployment.yaml` file: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: numa-deployment-1 - namespace: openshift-numaresources -spec: - replicas: 1 - selector: - matchLabels: - app: test - template: - metadata: - labels: - app: test - spec: - schedulerName: topo-aware-scheduler <1> - containers: - - name: ctnr - image: quay.io/openshifttest/hello-openshift:openshift - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "10" - requests: - memory: "100Mi" - cpu: "10" - - name: ctnr2 - image: gcr.io/google_containers/pause-amd64:3.0 - imagePullPolicy: IfNotPresent - command: ["/bin/sh", "-c"] - args: [ "while true; do sleep 1h; done;" ] - resources: - limits: - memory: "100Mi" - cpu: "8" - requests: - memory: "100Mi" - cpu: "8" ----- -<1> `schedulerName` must match the name of the NUMA-aware scheduler that is deployed in your cluster, for example `topo-aware-scheduler`. - -.. Create the `Deployment` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-deployment.yaml ----- - -.Verification - -. Verify that the deployment was successful: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numa-deployment-1-56954b7b46-pfgw8 2/2 Running 0 129m -numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 15h -numaresourcesoperator-worker-dvj4n 2/2 Running 0 18h -numaresourcesoperator-worker-lcg4t 2/2 Running 0 16h -secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 18h ----- - -. Verify that the `topo-aware-scheduler` is scheduling the deployed pod by running the following command: -+ -[source,terminal] ----- -$ oc describe pod numa-deployment-1-56954b7b46-pfgw8 -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 130m topo-aware-scheduler Successfully assigned openshift-numaresources/numa-deployment-1-56954b7b46-pfgw8 to compute-0.example.com ----- -+ -[NOTE] -==== -Deployments that request more resources than is available for scheduling will fail with a `MinimumReplicasUnavailable` error. The deployment succeeds when the required resources become available. Pods remain in the `Pending` state until the required resources are available. -==== - -. Verify that the expected allocated resources are listed for the node. - -.. Identify the node that is running the deployment pod by running the following command, replacing with the namespace you specified in the `Deployment` CR: -+ -[source,terminal] ----- -$ oc get pods -n -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -numa-deployment-1-65684f8fcc-bw4bw 0/2 Running 0 82m 10.128.2.50 worker-0 ----- -+ -.. Run the following command, replacing with the name of that node that is running the deployment pod. -+ -[source,terminal] ----- -$ oc describe noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -... - -Zones: - Costs: - Name: node-0 - Value: 10 - Name: node-1 - Value: 21 - Name: node-0 - Resources: - Allocatable: 39 - Available: 21 <1> - Capacity: 40 - Name: cpu - Allocatable: 6442450944 - Available: 6442450944 - Capacity: 6442450944 - Name: hugepages-1Gi - Allocatable: 134217728 - Available: 134217728 - Capacity: 134217728 - Name: hugepages-2Mi - Allocatable: 262415904768 - Available: 262206189568 - Capacity: 270146007040 - Name: memory - Type: Node ----- -<1> The `Available` capacity is reduced because of the resources that have been allocated to the guaranteed pod. -+ -Resources consumed by guaranteed pods are subtracted from the available node resources listed under `noderesourcetopologies.topology.node.k8s.io`. - -. Resource allocations for pods with a `Best-effort` or `Burstable` quality of service (`qosClass`) are not reflected in the NUMA node resources under `noderesourcetopologies.topology.node.k8s.io`. If a pod's consumed resources are not reflected in the node resource calculation, verify that the pod has `qosClass` of `Guaranteed` and the CPU request is an integer value, not a decimal value. You can verify the that the pod has a `qosClass` of `Guaranteed` by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n -o jsonpath="{ .status.qosClass }" ----- -+ -.Example output -[source,terminal] ----- -Guaranteed ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc b/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc deleted file mode 100644 index c553c600912a..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc +++ /dev/null @@ -1,478 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-about-cgu-crs_{context}"] -= About the ClusterGroupUpgrade CR - -The {cgu-operator-first} builds the remediation plan from the `ClusterGroupUpgrade` CR for a group of clusters. You can define the following specifications in a `ClusterGroupUpgrade` CR: - -* Clusters in the group -* Blocking `ClusterGroupUpgrade` CRs -* Applicable list of managed policies -* Number of concurrent updates -* Applicable canary updates -* Actions to perform before and after the update -* Update timing - -You can control the start time of an update using the `enable` field in the `ClusterGroupUpgrade` CR. -For example, if you have a scheduled maintenance window of four hours, you can prepare a `ClusterGroupUpgrade` CR with the `enable` field set to `false`. - -You can set the timeout by configuring the `spec.remediationStrategy.timeout` setting as follows: -[source,yaml] ----- -spec - remediationStrategy: - maxConcurrency: 1 - timeout: 240 ----- - -You can use the `batchTimeoutAction` to determine what happens if an update fails for a cluster. -You can specify `continue` to skip the failing cluster and continue to upgrade other clusters, or `abort` to stop policy remediation for all clusters. -Once the timeout elapses, {cgu-operator} removes all `enforce` policies to ensure that no further updates are made to clusters. - -To apply the changes, you set the `enabled` field to `true`. - -For more information see the "Applying update policies to managed clusters" section. - -As {cgu-operator} works through remediation of the policies to the specified clusters, the `ClusterGroupUpgrade` CR can report true or false statuses for a number of conditions. - -[NOTE] -==== -After {cgu-operator} completes a cluster update, the cluster does not update again under the control of the same `ClusterGroupUpgrade` CR. You must create a new `ClusterGroupUpgrade` CR in the following cases: - -* When you need to update the cluster again -* When the cluster changes to non-compliant with the `inform` policy after being updated -==== - -[id="selecting_clusters_{context}"] -== Selecting clusters - -{cgu-operator} builds a remediation plan and selects clusters based on the following fields: - -* The `clusterLabelSelector` field specifies the labels of the clusters that you want to update. This consists of a list of the standard label selectors from `k8s.io/apimachinery/pkg/apis/meta/v1`. Each selector in the list uses either label value pairs or label expressions. Matches from each selector are added to the final list of clusters along with the matches from the `clusterSelector` field and the `cluster` field. -* The `clusters` field specifies a list of clusters to update. -* The `canaries` field specifies the clusters for canary updates. -* The `maxConcurrency` field specifies the number of clusters to update in a batch. -* The `actions` field specifies `beforeEnable` actions that {cgu-operator} takes as it begins the update process, and `afterCompletion` actions that {cgu-operator} takes as it completes policy remediation for each cluster. - -You can use the `clusters`, `clusterLabelSelector`, and `clusterSelector` fields together to create a combined list of clusters. - -The remediation plan starts with the clusters listed in the `canaries` field. Each canary cluster forms a single-cluster batch. - -.Sample `ClusterGroupUpgrade` CR with the enabled `field` set to `false` - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -Spec: - actions: - afterCompletion: <1> - addClusterLabels: - upgrade-done: "" - deleteClusterLabels: - upgrade-running: "" - deleteObjects: true - beforeEnable: <2> - addClusterLabels: - upgrade-running: "" - backup: false - clusters: <3> - - spoke1 - enable: false <4> - managedPolicies: <5> - - talm-policy - preCaching: false - remediationStrategy: <6> - canaries: <7> - - spoke1 - maxConcurrency: 2 <8> - timeout: 240 - clusterLabelSelectors: <9> - - matchExpressions: - - key: label1 - operator: In - values: - - value1a - - value1b - batchTimeoutAction: <10> -status: <11> - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected <12> - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated <13> - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Not enabled - reason: NotEnabled - status: 'False' - type: Progressing - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - - spoke2 - - spoke3 - status: ----- -<1> Specifies the action that {cgu-operator} takes when it completes policy remediation for each cluster. -<2> Specifies the action that {cgu-operator} takes as it begins the update process. -<3> Defines the list of clusters to update. -<4> The `enable` field is set to `false`. -<5> Lists the user-defined set of policies to remediate. -<6> Defines the specifics of the cluster updates. -<7> Defines the clusters for canary updates. -<8> Defines the maximum number of concurrent updates in a batch. The number of remediation batches is the number of canary clusters, plus the number of clusters, except the canary clusters, divided by the `maxConcurrency` value. The clusters that are already compliant with all the managed policies are excluded from the remediation plan. -<9> Displays the parameters for selecting clusters. -<10> Controls what happens if a batch times out. Possible values are `abort` or `continue`. If unspecified, the default is `continue`. -<11> Displays information about the status of the updates. -<12> The `ClustersSelected` condition shows that all selected clusters are valid. -<13> The `Validated` condition shows that all selected clusters have been validated. - -[NOTE] -==== -Any failures during the update of a canary cluster stops the update process. -==== - -When the remediation plan is successfully created, you can you set the `enable` field to `true` and {cgu-operator} starts to update the non-compliant clusters with the specified managed policies. - -[NOTE] -==== -You can only make changes to the `spec` fields if the `enable` field of the `ClusterGroupUpgrade` CR is set to `false`. -==== - -[id="validating_{context}"] -== Validating - -{cgu-operator} checks that all specified managed policies are available and correct, and uses the `Validated` condition to report the status and reasons as follows: - -* `true` -+ -Validation is completed. -* `false` -+ -Policies are missing or invalid, or an invalid platform image has been specified. - -[id="precaching_{context}"] -== Pre-caching - -Clusters might have limited bandwidth to access the container image registry, which can cause a timeout before the updates are completed. On {sno} clusters, you can use pre-caching to avoid this. The container image pre-caching starts when you create a `ClusterGroupUpgrade` CR with the `preCaching` field set to `true`. -{cgu-operator} compares the available disk space with the estimated {product-title} image size to ensure that there is enough space. If a cluster has insufficient space, {cgu-operator} cancels pre-caching for that cluster and does not remediate policies on it. - -{cgu-operator} uses the `PrecacheSpecValid` condition to report status information as follows: - -* `true` -+ -The pre-caching spec is valid and consistent. -* `false` -+ -The pre-caching spec is incomplete. - -{cgu-operator} uses the `PrecachingSucceeded` condition to report status information as follows: - -* `true` -+ -TALM has concluded the pre-caching process. If pre-caching fails for any cluster, the update fails for that cluster but proceeds for all other clusters. A message informs you if pre-caching has failed for any clusters. -* `false` -+ -Pre-caching is still in progress for one or more clusters or has failed for all clusters. - -For more information see the "Using the container image pre-cache feature" section. - -[id="creating_backup_{context}"] -== Creating a backup - -For {sno}, {cgu-operator} can create a backup of a deployment before an update. If the update fails, you can recover the previous version and restore a cluster to a working state without requiring a reprovision of applications. To use the backup feature you first create a `ClusterGroupUpgrade` CR with the `backup` field set to `true`. To ensure that the contents of the backup are up to date, the backup is not taken until you set the `enable` field in the `ClusterGroupUpgrade` CR to `true`. - -{cgu-operator} uses the `BackupSucceeded` condition to report the status and reasons as follows: - -* `true` -+ -Backup is completed for all clusters or the backup run has completed but failed for one or more clusters. If backup fails for any cluster, the update fails for that cluster but proceeds for all other clusters. -* `false` -+ -Backup is still in progress for one or more clusters or has failed for all clusters. - -For more information, see the "Creating a backup of cluster resources before upgrade" section. - -[id="updating_clusters_{context}"] -== Updating clusters -{cgu-operator} enforces the policies following the remediation plan. -Enforcing the policies for subsequent batches starts immediately after all the clusters of the current batch are compliant with all the managed policies. If the batch times out, {cgu-operator} moves on to the next batch. The timeout value of a batch is the `spec.timeout` field divided by the number of batches in the remediation plan. - -{cgu-operator} uses the `Progressing` condition to report the status and reasons as follows: - -* `true` -+ -{cgu-operator} is remediating non-compliant policies. -* `false` -+ -The update is not in progress. Possible reasons for this are: -+ -** All clusters are compliant with all the managed policies. -** The update has timed out as policy remediation took too long. -** Blocking CRs are missing from the system or have not yet completed. -** The `ClusterGroupUpgrade` CR is not enabled. -** Backup is still in progress. - -[NOTE] -==== -The managed policies apply in the order that they are listed in the `managedPolicies` field in the `ClusterGroupUpgrade` CR. One managed policy is applied to the specified clusters at a time. When a cluster complies with the current policy, the next managed policy is applied to it. -==== - -.Sample `ClusterGroupUpgrade` CR in the `Progressing` state - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -Spec: - actions: - afterCompletion: - deleteObjects: true - beforeEnable: {} - backup: false - clusters: - - spoke1 - enable: true - managedPolicies: - - talm-policy - preCaching: true - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 - clusterLabelSelectors: - - matchExpressions: - - key: label1 - operator: In - values: - - value1a - - value1b - batchTimeoutAction: -status: - clusters: - - name: spoke1 - state: complete - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Remediating non-compliant policies - reason: InProgress - status: 'True' - type: Progressing <1> - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - - spoke2 - - spoke3 - status: - currentBatch: 2 - currentBatchRemediationProgress: - spoke2: - state: Completed - spoke3: - policyIndex: 0 - state: InProgress - currentBatchStartedAt: '2022-11-18T16:27:16Z' - startedAt: '2022-11-18T16:27:15Z' ----- -<1> The `Progressing` fields show that {cgu-operator} is in the process of remediating policies. - -[id="update_status_{context}"] -== Update status - -{cgu-operator} uses the `Succeeded` condition to report the status and reasons as follows: - -* `true` -+ -All clusters are compliant with the specified managed policies. -* `false` -+ -Policy remediation failed as there were no clusters available for remediation, or because policy remediation took too long for one of the following reasons: -+ -** The current batch contains canary updates and the cluster in the batch does not comply with all the managed policies within the batch timeout. -** Clusters did not comply with the managed policies within the `timeout` value specified in the `remediationStrategy` field. - - -.Sample `ClusterGroupUpgrade` CR in the `Succeeded` state - -[source,yaml] ----- - apiVersion: ran.openshift.io/v1alpha1 - kind: ClusterGroupUpgrade - metadata: - name: cgu-upgrade-complete - namespace: default - spec: - clusters: - - spoke1 - - spoke4 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 - status: <3> - clusters: - - name: spoke1 - state: complete - - name: spoke4 - state: complete - conditions: - - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: "True" - type: ClustersSelected - - message: Completed validation - reason: ValidationCompleted - status: "True" - type: Validated - - message: All clusters are compliant with all the managed policies - reason: Completed - status: "False" - type: Progressing <1> - - message: All clusters are compliant with all the managed policies - reason: Completed - status: "True" - type: Succeeded <2> - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - remediationPlan: - - - spoke1 - - - spoke4 - status: - completedAt: '2022-11-18T16:27:16Z' - startedAt: '2022-11-18T16:27:15Z' - ----- -<1> In the `Progressing` fields, the status is `false` as the update has completed; clusters are compliant with all the managed policies. -<2> The `Succeeded` fields show that the validations completed successfully. -<3> The `status` field includes a list of clusters and their respective statuses. The status of a cluster can be `complete` or `timedout`. - -.Sample `ClusterGroupUpgrade` CR in the `timedout` state - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -spec: - actions: - afterCompletion: - deleteObjects: true - beforeEnable: {} - backup: false - clusters: - - spoke1 - - spoke2 - enable: true - managedPolicies: - - talm-policy - preCaching: false - remediationStrategy: - maxConcurrency: 2 - timeout: 240 -status: - clusters: - - name: spoke1 - state: complete - - currentPolicy: <1> - name: talm-policy - status: NonCompliant - name: spoke2 - state: timedout - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Policy remediation took too long - reason: TimedOut - status: 'False' - type: Progressing - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Policy remediation took too long - reason: TimedOut - status: 'False' - type: Succeeded <2> - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - spoke2 - status: - startedAt: '2022-11-18T16:27:15Z' - completedAt: '2022-11-18T20:27:15Z' ----- -<1> If a cluster’s state is `timedout`, the `currentPolicy` field shows the name of the policy and the policy status. -<2> The status for `succeeded` is `false` and the message indicates that policy remediation took too long. diff --git a/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc b/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc deleted file mode 100644 index 3e49e4543f40..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc +++ /dev/null @@ -1,381 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-apply-policies_{context}"] -= Applying update policies to managed clusters - -You can update your managed clusters by applying your policies. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR in the `cgu-1.yaml` file. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-1 - namespace: default -spec: - managedPolicies: <1> - - policy1-common-cluster-version-policy - - policy2-common-nto-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - enable: false - clusters: <2> - - spoke1 - - spoke2 - - spoke5 - - spoke6 - remediationStrategy: - maxConcurrency: 2 <3> - timeout: 240 <4> - batchTimeoutAction: <5> ----- -<1> The name of the policies to apply. -<2> The list of clusters to update. -<3> The `maxConcurrency` field signifies the number of clusters updated at the same time. -<4> The update timeout in minutes. -<5> Controls what happens if a batch times out. Possible values are `abort` or `continue`. If unspecified, the default is `continue`. - -. Create the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f cgu-1.yaml ----- - -.. Check if the `ClusterGroupUpgrade` CR was created in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME AGE STATE DETAILS -default cgu-1 8m55 NotEnabled Not Enabled ----- - -.. Check the status of the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-1 -ojsonpath='{.status}' | jq ----- -+ -.Example output -+ -[source,json] ----- -{ - "computedMaxConcurrency": 2, - "conditions": [ - { - "lastTransitionTime": "2022-02-25T15:34:07Z", - "message": "Not enabled", <1> - "reason": "NotEnabled", - "status": "False", - "type": "Progressing" - } - ], - "copiedPolicies": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "managedPoliciesContent": { - "policy1-common-cluster-version-policy": "null", - "policy2-common-nto-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"node-tuning-operator\",\"namespace\":\"openshift-cluster-node-tuning-operator\"}]", - "policy3-common-ptp-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"ptp-operator-subscription\",\"namespace\":\"openshift-ptp\"}]", - "policy4-common-sriov-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"sriov-network-operator-subscription\",\"namespace\":\"openshift-sriov-network-operator\"}]" - }, - "managedPoliciesForUpgrade": [ - { - "name": "policy1-common-cluster-version-policy", - "namespace": "default" - }, - { - "name": "policy2-common-nto-sub-policy", - "namespace": "default" - }, - { - "name": "policy3-common-ptp-sub-policy", - "namespace": "default" - }, - { - "name": "policy4-common-sriov-sub-policy", - "namespace": "default" - } - ], - "managedPoliciesNs": { - "policy1-common-cluster-version-policy": "default", - "policy2-common-nto-sub-policy": "default", - "policy3-common-ptp-sub-policy": "default", - "policy4-common-sriov-sub-policy": "default" - }, - "placementBindings": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "placementRules": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "precaching": { - "spec": {} - }, - "remediationPlan": [ - [ - "spoke1", - "spoke2" - ], - [ - "spoke5", - "spoke6" - ] - ], - "status": {} -} ----- -<1> The `spec.enable` field in the `ClusterGroupUpgrade` CR is set to `false`. - -.. Check the status of the policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default cgu-policy1-common-cluster-version-policy enforce 17m <1> -default cgu-policy2-common-nto-sub-policy enforce 17m -default cgu-policy3-common-ptp-sub-policy enforce 17m -default cgu-policy4-common-sriov-sub-policy enforce 17m -default policy1-common-cluster-version-policy inform NonCompliant 15h -default policy2-common-nto-sub-policy inform NonCompliant 15h -default policy3-common-ptp-sub-policy inform NonCompliant 18m -default policy4-common-sriov-sub-policy inform NonCompliant 18m ----- -<1> The `spec.remediationAction` field of policies currently applied on the clusters is set to `enforce`. The managed policies in `inform` mode from the `ClusterGroupUpgrade` CR remain in `inform` mode during the update. - -. Change the value of the `spec.enable` field to `true` by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-1 \ ---patch '{"spec":{"enable":true}}' --type=merge ----- - -.Verification - -. Check the status of the update again by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-1 -ojsonpath='{.status}' | jq ----- -+ -.Example output -+ -[source,json] ----- -{ - "computedMaxConcurrency": 2, - "conditions": [ <1> - { - "lastTransitionTime": "2022-02-25T15:33:07Z", - "message": "All selected clusters are valid", - "reason": "ClusterSelectionCompleted", - "status": "True", - "type": "ClustersSelected", - "lastTransitionTime": "2022-02-25T15:33:07Z", - "message": "Completed validation", - "reason": "ValidationCompleted", - "status": "True", - "type": "Validated", - "lastTransitionTime": "2022-02-25T15:34:07Z", - "message": "Remediating non-compliant policies", - "reason": "InProgress", - "status": "True", - "type": "Progressing" - } - ], - "copiedPolicies": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "managedPoliciesContent": { - "policy1-common-cluster-version-policy": "null", - "policy2-common-nto-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"node-tuning-operator\",\"namespace\":\"openshift-cluster-node-tuning-operator\"}]", - "policy3-common-ptp-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"ptp-operator-subscription\",\"namespace\":\"openshift-ptp\"}]", - "policy4-common-sriov-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"sriov-network-operator-subscription\",\"namespace\":\"openshift-sriov-network-operator\"}]" - }, - "managedPoliciesForUpgrade": [ - { - "name": "policy1-common-cluster-version-policy", - "namespace": "default" - }, - { - "name": "policy2-common-nto-sub-policy", - "namespace": "default" - }, - { - "name": "policy3-common-ptp-sub-policy", - "namespace": "default" - }, - { - "name": "policy4-common-sriov-sub-policy", - "namespace": "default" - } - ], - "managedPoliciesNs": { - "policy1-common-cluster-version-policy": "default", - "policy2-common-nto-sub-policy": "default", - "policy3-common-ptp-sub-policy": "default", - "policy4-common-sriov-sub-policy": "default" - }, - "placementBindings": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "placementRules": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "precaching": { - "spec": {} - }, - "remediationPlan": [ - [ - "spoke1", - "spoke2" - ], - [ - "spoke5", - "spoke6" - ] - ], - "status": { - "currentBatch": 1, - "currentBatchStartedAt": "2022-02-25T15:54:16Z", - "remediationPlanForBatch": { - "spoke1": 0, - "spoke2": 1 - }, - "startedAt": "2022-02-25T15:54:16Z" - } -} ----- -<1> Reflects the update progress of the current batch. Run this command again to receive updated information about the progress. - -. If the policies include Operator subscriptions, you can check the installation progress directly on the single-node cluster. - -.. Export the `KUBECONFIG` file of the single-node cluster you want to check the installation progress for by running the following command: -+ -[source,terminal] ----- -$ export KUBECONFIG= ----- - -.. Check all the subscriptions present on the single-node cluster and look for the one in the policy you are trying to install through the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc get subs -A | grep -i ----- -+ -.Example output for `cluster-logging` policy -+ -[source,terminal] ----- -NAMESPACE NAME PACKAGE SOURCE CHANNEL -openshift-logging cluster-logging cluster-logging redhat-operators stable ----- - -. If one of the managed policies includes a `ClusterVersion` CR, check the status of platform updates in the current batch by running the following command against the spoke cluster: -+ -[source,terminal] ----- -$ oc get clusterversion ----- -+ -.Example output -+ -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version 4.9.5 True True 43s Working towards 4.9.7: 71 of 735 done (9% complete) ----- - -. Check the Operator subscription by running the following command: -+ -[source,terminal] ----- -$ oc get subs -n -ojsonpath="{.status}" ----- - -. Check the install plans present on the single-node cluster that is associated with the desired subscription by running the following command: -+ -[source,terminal] ----- -$ oc get installplan -n ----- -+ -.Example output for `cluster-logging` Operator -+ -[source,terminal] ----- -NAMESPACE NAME CSV APPROVAL APPROVED -openshift-logging install-6khtw cluster-logging.5.3.3-4 Manual true <1> ----- -<1> The install plans have their `Approval` field set to `Manual` and their `Approved` field changes from `false` to `true` after {cgu-operator} approves the install plan. -+ -[NOTE] -==== -When {cgu-operator} is remediating a policy containing a subscription, it automatically approves any install plans attached to that subscription. -Where multiple install plans are needed to get the operator to the latest known version, {cgu-operator} might approve multiple install plans, upgrading through one or more intermediate versions to get to the final version. -==== - -. Check if the cluster service version for the Operator of the policy that the `ClusterGroupUpgrade` is installing reached the `Succeeded` phase by running the following command: -+ -[source,terminal] ----- -$ oc get csv -n ----- -+ -.Example output for OpenShift Logging Operator -+ -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -cluster-logging.5.4.2 Red Hat OpenShift Logging 5.4.2 Succeeded ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc b/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc deleted file mode 100644 index 9507019846cc..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-precache-autocreated-cgu-for-ztp_{context}"] -= About the auto-created ClusterGroupUpgrade CR for {ztp} - -{cgu-operator} has a controller called `ManagedClusterForCGU` that monitors the `Ready` state of the `ManagedCluster` CRs on the hub cluster and creates the `ClusterGroupUpgrade` CRs for {ztp-first}. - -For any managed cluster in the `Ready` state without a `ztp-done` label applied, the `ManagedClusterForCGU` controller automatically creates a `ClusterGroupUpgrade` CR in the `ztp-install` namespace with its associated {rh-rhacm} policies that are created during the {ztp} process. {cgu-operator} then remediates the set of configuration policies that are listed in the auto-created `ClusterGroupUpgrade` CR to push the configuration CRs to the managed cluster. - -If there are no policies for the managed cluster at the time when the cluster becomes `Ready`, a `ClusterGroupUpgrade` CR with no policies is created. Upon completion of the `ClusterGroupUpgrade` the managed cluster is labeled as `ztp-done`. If there are policies that you want to apply for that managed cluster, manually create a `ClusterGroupUpgrade` as a day-2 operation. - -.Example of an auto-created `ClusterGroupUpgrade` CR for {ztp} - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - generation: 1 - name: spoke1 - namespace: ztp-install - ownerReferences: - - apiVersion: cluster.open-cluster-management.io/v1 - blockOwnerDeletion: true - controller: true - kind: ManagedCluster - name: spoke1 - uid: 98fdb9b2-51ee-4ee7-8f57-a84f7f35b9d5 - resourceVersion: "46666836" - uid: b8be9cd2-764f-4a62-87d6-6b767852c7da -spec: - actions: - afterCompletion: - addClusterLabels: - ztp-done: "" <1> - deleteClusterLabels: - ztp-running: "" - deleteObjects: true - beforeEnable: - addClusterLabels: - ztp-running: "" <2> - clusters: - - spoke1 - enable: true - managedPolicies: - - common-spoke1-config-policy - - common-spoke1-subscriptions-policy - - group-spoke1-config-policy - - spoke1-config-policy - - group-spoke1-validator-du-policy - preCaching: false - remediationStrategy: - maxConcurrency: 1 - timeout: 240 ----- -<1> Applied to the managed cluster when {cgu-operator} completes the cluster configuration. -<2> Applied to the managed cluster when {cgu-operator} starts deploying the configuration policies. diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc deleted file mode 100644 index d0abba993491..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-backup-feature-concept_{context}"] -= Creating a backup of cluster resources before upgrade - -For {sno}, the {cgu-operator-first} can create a backup of a deployment before an upgrade. If the upgrade fails, you can recover the previous version and restore a cluster to a working state without requiring a reprovision of applications. - -To use the backup feature you first create a `ClusterGroupUpgrade` CR with the `backup` field set to `true`. To ensure that the contents of the backup are up to date, the backup is not taken until you set the `enable` field in the `ClusterGroupUpgrade` CR to `true`. - -{cgu-operator} uses the `BackupSucceeded` condition to report the status and reasons as follows: - -* `true` -+ -Backup is completed for all clusters or the backup run has completed but failed for one or more clusters. If backup fails for any cluster, the update does not proceed for that cluster. -* `false` -+ -Backup is still in progress for one or more clusters or has failed for all clusters. The backup process running in the spoke clusters can have the following statuses: -+ -** `PreparingToStart` -+ -The first reconciliation pass is in progress. The {cgu-operator} deletes any spoke backup namespace and hub view resources that have been created in a failed upgrade attempt. -** `Starting` -+ -The backup prerequisites and backup job are being created. -** `Active` -+ -The backup is in progress. -** `Succeeded` -+ -The backup succeeded. -** `BackupTimeout` -+ -Artifact backup is partially done. -** `UnrecoverableError` -+ -The backup has ended with a non-zero exit code. - -[NOTE] -==== -If the backup of a cluster fails and enters the `BackupTimeout` or `UnrecoverableError` state, the cluster update does not proceed for that cluster. Updates to other clusters are not affected and continue. -==== diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc deleted file mode 100644 index f1f6baae6f82..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-backup-start_and_update_{context}"] -= Creating a ClusterGroupUpgrade CR with backup - -You can create a backup of a deployment before an upgrade on {sno} clusters. If the upgrade fails you can use the `upgrade-recovery.sh` script generated by {cgu-operator-first} to return the system to its preupgrade state. -The backup consists of the following items: - -Cluster backup:: A snapshot of `etcd` and static pod manifests. -Content backup:: Backups of folders, for example, `/etc`, `/usr/local`, `/var/lib/kubelet`. -Changed files backup:: Any files managed by `machine-config` that have been changed. -Deployment:: A pinned `ostree` deployment. -Images (Optional):: Any container images that are in use. - - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Install {rh-rhacm-first}. - -[NOTE] -==== -It is highly recommended that you create a recovery partition. -The following is an example `SiteConfig` custom resource (CR) for a recovery partition of 50 GB: - -[source,yaml] ----- -nodes: - - hostName: "snonode.sno-worker-0.e2e.bos.redhat.com" - role: "master" - rootDeviceHints: - hctl: "0:2:0:0" - deviceName: /dev/sda -........ -........ - #Disk /dev/sda: 893.3 GiB, 959119884288 bytes, 1873281024 sectors - diskPartition: - - device: /dev/sda - partitions: - - mount_point: /var/recovery - size: 51200 - start: 800000 ----- -==== - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR with the `backup` and `enable` fields set to `true` in the `clustergroupupgrades-group-du.yaml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: du-upgrade-4918 - namespace: ztp-group-du-sno -spec: - preCaching: true - backup: true - clusters: - - cnfdb1 - - cnfdb2 - enable: true - managedPolicies: - - du-upgrade-platform-upgrade - remediationStrategy: - maxConcurrency: 2 - timeout: 240 ----- - -. To start the update, apply the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f clustergroupupgrades-group-du.yaml ----- - -.Verification - -* Check the status of the upgrade in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -{ - "backup": { - "clusters": [ - "cnfdb2", - "cnfdb1" - ], - "status": { - "cnfdb1": "Succeeded", - "cnfdb2": "Failed" <1> - } -}, -"computedMaxConcurrency": 1, -"conditions": [ - { - "lastTransitionTime": "2022-04-05T10:37:19Z", - "message": "Backup failed for 1 cluster", <2> - "reason": "PartiallyDone", <3> - "status": "True", <4> - "type": "Succeeded" - } -], -"precaching": { - "spec": {} -}, -"status": {} ----- -<1> Backup has failed for one cluster. -<2> The message confirms that the backup failed for one cluster. -<3> The backup was partially successful. -<4> The backup process has finished. diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc deleted file mode 100644 index 0c5ee05c1881..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-backup-recovery_{context}"] -= Recovering a cluster after a failed upgrade - -If an upgrade of a cluster fails, you can manually log in to the cluster and use the backup to return the cluster to its preupgrade state. There are two stages: - -Rollback:: If the attempted upgrade included a change to the platform OS deployment, you must roll back to the previous version before running the recovery script. - -[IMPORTANT] -==== -A rollback is only applicable to upgrades from TALM and single-node OpenShift. This process does not apply to rollbacks from any other upgrade type. -==== - -Recovery:: The recovery shuts down containers and uses files from the backup partition to relaunch containers and restore clusters. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Install {rh-rhacm-first}. -* Log in as a user with `cluster-admin` privileges. -* Run an upgrade that is configured for backup. - -.Procedure - -. Delete the previously created `ClusterGroupUpgrade` custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc delete cgu/du-upgrade-4918 -n ztp-group-du-sno ----- - -. Log in to the cluster that you want to recover. - -. Check the status of the platform OS deployment by running the following command: -+ -[source,terminal] ----- -$ ostree admin status ----- -.Example outputs -+ -[source,terminal] ----- -[root@lab-test-spoke2-node-0 core]# ostree admin status -* rhcos c038a8f08458bbed83a77ece033ad3c55597e3f64edad66ea12fda18cbdceaf9.0 - Version: 49.84.202202230006-0 - Pinned: yes <1> - origin refspec: c038a8f08458bbed83a77ece033ad3c55597e3f64edad66ea12fda18cbdceaf9 ----- -<1> The current deployment is pinned. A platform OS deployment rollback is not necessary. -+ -[source,terminal] ----- -[root@lab-test-spoke2-node-0 core]# ostree admin status -* rhcos f750ff26f2d5550930ccbe17af61af47daafc8018cd9944f2a3a6269af26b0fa.0 - Version: 410.84.202204050541-0 - origin refspec: f750ff26f2d5550930ccbe17af61af47daafc8018cd9944f2a3a6269af26b0fa -rhcos ad8f159f9dc4ea7e773fd9604c9a16be0fe9b266ae800ac8470f63abc39b52ca.0 (rollback) <1> - Version: 410.84.202203290245-0 - Pinned: yes <2> - origin refspec: ad8f159f9dc4ea7e773fd9604c9a16be0fe9b266ae800ac8470f63abc39b52ca ----- -<1> This platform OS deployment is marked for rollback. -<2> The previous deployment is pinned and can be rolled back. - -. To trigger a rollback of the platform OS deployment, run the following command: -+ -[source,terminal] ----- -$ rpm-ostree rollback -r ----- - -. The first phase of the recovery shuts down containers and restores files from the backup partition to the targeted directories. To begin the recovery, run the following command: -+ -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh ----- -+ - -. When prompted, reboot the cluster by running the following command: -+ -[source,terminal] ----- -$ systemctl reboot ----- -. After the reboot, restart the recovery by running the following command: -+ -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh --resume ----- - -[NOTE] -==== -If the recovery utility fails, you can retry with the `--restart` option: -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh --restart ----- -==== - -.Verification -* To check the status of the recovery run the following command: -+ -[source,terminal] ----- -$ oc get clusterversion,nodes,clusteroperator ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -clusterversion.config.openshift.io/version 4.9.23 True False 86d Cluster version is 4.9.23 <1> - - -NAME STATUS ROLES AGE VERSION -node/lab-test-spoke1-node-0 Ready master,worker 86d v1.22.3+b93fd35 <2> - -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE -clusteroperator.config.openshift.io/authentication 4.9.23 True False False 2d7h <3> -clusteroperator.config.openshift.io/baremetal 4.9.23 True False False 86d - - -.............. ----- -<1> The cluster version is available and has the correct version. -<2> The node status is `Ready`. -<3> The `ClusterOperator` object's availability is `True`. diff --git a/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc b/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc deleted file mode 100644 index 8980d8a04b27..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="installing-topology-aware-lifecycle-manager-using-cli_{context}"] -= Installing the {cgu-operator-full} by using the CLI - -You can use the OpenShift CLI (`oc`) to install the {cgu-operator-first}. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Install the latest version of the {rh-rhacm} Operator. -* Set up a hub cluster with disconnected registry. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `talm-subscription.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: openshift-topology-aware-lifecycle-manager-subscription - namespace: openshift-operators -spec: - channel: "stable" - name: topology-aware-lifecycle-manager - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f talm-subscription.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource: -+ -[source,terminal] ----- -$ oc get csv -n openshift-operators ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -topology-aware-lifecycle-manager.{product-version}.x Topology Aware Lifecycle Manager {product-version}.x Succeeded ----- - -. Verify that the {cgu-operator} is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-operators ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE -openshift-operators cluster-group-upgrades-controller-manager 1/1 1 1 14s ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc b/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc deleted file mode 100644 index 88ea0d4d24e3..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="installing-topology-aware-lifecycle-manager-using-web-console_{context}"] -= Installing the {cgu-operator-full} by using the web console - -You can use the {product-title} web console to install the {cgu-operator-full}. - -.Prerequisites - -// Based on polarion test cases - -* Install the latest version of the {rh-rhacm} Operator. -* Set up a hub cluster with disconnected regitry. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the *{cgu-operator-full}* from the list of available Operators, and then click *Install*. -. Keep the default selection of *Installation mode* ["All namespaces on the cluster (default)"] and *Installed Namespace* ("openshift-operators") to ensure that the Operator is installed properly. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `All Namespaces` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any containers in the `cluster-group-upgrades-controller-manager` pod that are reporting issues. diff --git a/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc deleted file mode 100644 index c3bd2729b31b..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc +++ /dev/null @@ -1,136 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-operator-and-platform-update_{context}"] -= Performing a platform and an Operator update together - -You can perform a platform and an Operator update at the same time. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Create the `PolicyGenTemplate` CR for the updates by following the steps described in the "Performing a platform update" and "Performing an Operator update" sections. - -. Apply the prep work for the platform and the Operator update. - -.. Save the content of the `ClusterGroupUpgrade` CR with the policies for platform update preparation work, catalog source updates, and target clusters to the `cgu-platform-operator-upgrade-prep.yml` file, for example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-operator-upgrade-prep - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade-prep - - du-upgrade-operator-catsrc-policy - clusterSelector: - - group-du-sno - remediationStrategy: - maxConcurrency: 10 - enable: true ----- - -.. Apply the `cgu-platform-operator-upgrade-prep.yml` file to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-operator-upgrade-prep.yml ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- - -. Create the `ClusterGroupUpdate` CR for the platform and the Operator update with the `spec.enable` field set to `false`. -.. Save the contents of the platform and Operator update `ClusterGroupUpdate` CR with the policies and the target clusters to the `cgu-platform-operator-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-du-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade <1> - - du-upgrade-operator-catsrc-policy <2> - - common-subscriptions-policy <3> - preCaching: true - clusterSelector: - - group-du-sno - remediationStrategy: - maxConcurrency: 1 - enable: false ----- -<1> This is the platform update policy. -<2> This is the policy containing the catalog source information for the Operators to be updated. It is needed for the pre-caching feature to determine which Operator images to download to the managed cluster. -<3> This is the policy to update the Operators. - -.. Apply the `cgu-platform-operator-upgrade.yml` file to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-operator-upgrade.yml ----- - -. Optional: Pre-cache the images for the platform and the Operator update. -.. Enable pre-caching in the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-du-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the update process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the managed cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talm-pre-cache ----- - -.. Check if the pre-caching is completed before starting the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu cgu-du-upgrade -ojsonpath='{.status.conditions}' ----- - -. Start the platform and Operator update. -.. Enable the `cgu-du-upgrade` `ClusterGroupUpgrade` CR to start the platform and the Operator update by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-du-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -[NOTE] -==== -The CRs for the platform and Operator updates can be created from the beginning by configuring the setting to `spec.enable: true`. In this case, the update starts immediately after pre-caching completes and there is no need to manually enable the CR. - -Both pre-caching and the update create extra resources, such as policies, placement bindings, placement rules, managed cluster actions, and managed cluster view, to help complete the procedures. Setting the `afterCompletion.deleteObjects` field to `true` deletes all these resources after the updates complete. -==== diff --git a/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc deleted file mode 100644 index 8cf75b7e0ba8..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc +++ /dev/null @@ -1,263 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-operator-update_{context}"] -= Performing an Operator update - -You can perform an Operator update with the {cgu-operator}. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Mirror the desired index image, bundle images, and all Operator images referenced in the bundle images. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Update the `PolicyGenTemplate` CR for the Operator update. -.. Update the `du-upgrade` `PolicyGenTemplate` CR with the following additional contents in the `du-upgrade.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - - fileName: DefaultCatsrc.yaml - remediationAction: inform - policyName: "operator-catsrc-policy" - metadata: - name: redhat-operators - spec: - displayName: Red Hat Operators Catalog - image: registry.example.com:5000/olm/redhat-operators:v{product-version} <1> - updateStrategy: <2> - registryPoll: - interval: 1h ----- -<1> The index image URL contains the desired Operator images. If the index images are always pushed to the same image name and tag, this change is not needed. -<2> Set how frequently the Operator Lifecycle Manager (OLM) polls the index image for new Operator versions with the `registryPoll.interval` field. This change is not needed if a new index image tag is always pushed for y-stream and z-stream Operator updates. The `registryPoll.interval` field can be set to a shorter interval to expedite the update, however shorter intervals increase computational load. To counteract this, you can restore `registryPoll.interval` to the default value once the update is complete. - - -.. This update generates one policy, `du-upgrade-operator-catsrc-policy`, to update the `redhat-operators` catalog source with the new index images that contain the desired Operators images. -+ -[NOTE] -==== -If you want to use the image pre-caching for Operators and there are Operators from a different catalog source other than `redhat-operators`, you must perform the following tasks: - -* Prepare a separate catalog source policy with the new index image or registry poll interval update for the different catalog source. -* Prepare a separate subscription policy for the desired Operators that are from the different catalog source. -==== -+ -For example, the desired SRIOV-FEC Operator is available in the `certified-operators` catalog source. To update the catalog source and the Operator subscription, add the following contents to generate two policies, `du-upgrade-fec-catsrc-policy` and `du-upgrade-subscriptions-fec-policy`: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - … - - fileName: DefaultCatsrc.yaml - remediationAction: inform - policyName: "fec-catsrc-policy" - metadata: - name: certified-operators - spec: - displayName: Intel SRIOV-FEC Operator - image: registry.example.com:5000/olm/far-edge-sriov-fec:v4.10 - updateStrategy: - registryPoll: - interval: 10m - - fileName: AcceleratorsSubscription.yaml - policyName: "subscriptions-fec-policy" - spec: - channel: "stable" - source: certified-operators ----- - -.. Remove the specified subscriptions channels in the common `PolicyGenTemplate` CR, if they exist. The default subscriptions channels from the {ztp} image are used for the update. -+ -[NOTE] -==== -The default channel for the Operators applied through {ztp} {product-version} is `stable`, except for the `performance-addon-operator`. As of {product-title} 4.11, the `performance-addon-operator` functionality was moved to the `node-tuning-operator`. For the 4.10 release, the default channel for PAO is `v4.10`. You can also specify the default channels in the common `PolicyGenTemplate` CR. -==== - -.. Push the `PolicyGenTemplate` CRs updates to the {ztp} Git repository. -+ -ArgoCD pulls the changes from the Git repository and generates the policies on the hub cluster. - -.. Check the created policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep -E "catsrc-policy|subscription" ----- - -. Apply the required catalog source updates before starting the Operator update. - -.. Save the content of the `ClusterGroupUpgrade` CR named `operator-upgrade-prep` with the catalog source policies and the target managed clusters to the `cgu-operator-upgrade-prep.yml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-operator-upgrade-prep - namespace: default -spec: - clusters: - - spoke1 - enable: true - managedPolicies: - - du-upgrade-operator-catsrc-policy - remediationStrategy: - maxConcurrency: 1 ----- - -.. Apply the policy to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-operator-upgrade-prep.yml ----- - -.. Monitor the update process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep -E "catsrc-policy" ----- - -. Create the `ClusterGroupUpgrade` CR for the Operator update with the `spec.enable` field set to `false`. -.. Save the content of the Operator update `ClusterGroupUpgrade` CR with the `du-upgrade-operator-catsrc-policy` policy and the subscription policies created from the common `PolicyGenTemplate` and the target clusters to the `cgu-operator-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-operator-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-operator-catsrc-policy <1> - - common-subscriptions-policy <2> - preCaching: false - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: false ----- -<1> The policy is needed by the image pre-caching feature to retrieve the operator images from the catalog source. -<2> The policy contains Operator subscriptions. If you have followed the structure and content of the reference `PolicyGenTemplates`, all Operator subscriptions are grouped into the `common-subscriptions-policy` policy. -+ -[NOTE] -==== -One `ClusterGroupUpgrade` CR can only pre-cache the images of the desired Operators defined in the subscription policy from one catalog source included in the `ClusterGroupUpgrade` CR. If the desired Operators are from different catalog sources, such as in the example of the SRIOV-FEC Operator, another `ClusterGroupUpgrade` CR must be created with `du-upgrade-fec-catsrc-policy` and `du-upgrade-subscriptions-fec-policy` policies for the SRIOV-FEC Operator images pre-caching and update. -==== - -.. Apply the `ClusterGroupUpgrade` CR to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-operator-upgrade.yml ----- - -. Optional: Pre-cache the images for the Operator update. - -.. Before starting image pre-caching, verify the subscription policy is `NonCompliant` at this point by running the following command: -+ -[source,terminal] ----- -$ oc get policy common-subscriptions-policy -n ----- -+ -.Example output -+ -[source,terminal] ----- -NAME REMEDIATION ACTION COMPLIANCE STATE AGE -common-subscriptions-policy inform NonCompliant 27d ----- - -.. Enable pre-caching in the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-operator-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the managed cluster: -+ -[source,terminal] ----- -$ oc get cgu cgu-operator-upgrade -o jsonpath='{.status.precaching.status}' ----- - -.. Check if the pre-caching is completed before starting the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-operator-upgrade -ojsonpath='{.status.conditions}' | jq ----- -+ -.Example output -+ -[source,json] ----- -[ - { - "lastTransitionTime": "2022-03-08T20:49:08.000Z", - "message": "The ClusterGroupUpgrade CR is not enabled", - "reason": "UpgradeNotStarted", - "status": "False", - "type": "Ready" - }, - { - "lastTransitionTime": "2022-03-08T20:55:30.000Z", - "message": "Precaching is completed", - "reason": "PrecachingCompleted", - "status": "True", - "type": "PrecachingDone" - } -] ----- - -. Start the Operator update. - -.. Enable the `cgu-operator-upgrade` `ClusterGroupUpgrade` CR and disable pre-caching to start the Operator update by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-operator-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc deleted file mode 100644 index 796c2e4c5c0f..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talm-pao-update_{context}"] -= Removing Performance Addon Operator subscriptions from deployed clusters - -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 or later, these functions are part of the Node Tuning Operator. - -Do not install the Performance Addon Operator on clusters running {product-title} 4.11 or later. If you upgrade to {product-title} 4.11 or later, the Node Tuning Operator automatically removes the Performance Addon Operator. - -[NOTE] -==== -You need to remove any policies that create Performance Addon Operator subscriptions to prevent a re-installation of the Operator. -==== - -The reference DU profile includes the Performance Addon Operator in the `PolicyGenTemplate` CR `common-ranGen.yaml`. To remove the subscription from deployed managed clusters, you must update `common-ranGen.yaml`. - -[NOTE] -==== -If you install Performance Addon Operator 4.10.3-5 or later on {product-title} 4.11 or later, the Performance Addon Operator detects the cluster version and automatically hibernates to avoid interfering with the Node Tuning Operator functions. However, to ensure best performance, remove the Performance Addon Operator from your {product-title} 4.11 clusters. -==== - -.Prerequisites - -* Create a Git repository where you manage your custom site configuration data. The repository must be accessible from the hub cluster and be defined as a source repository for ArgoCD. - -* Update to {product-title} 4.11 or later. - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Change the `complianceType` to `mustnothave` for the Performance Addon Operator namespace, Operator group, and subscription in the `common-ranGen.yaml` file. -+ -[source,yaml] ----- - - fileName: PaoSubscriptionNS.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave - - fileName: PaoSubscriptionOperGroup.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave - - fileName: PaoSubscription.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave ----- - -. Merge the changes with your custom site repository and wait for the ArgoCD application to synchronize the change to the hub cluster. The status of the `common-subscriptions-policy` policy changes to `Non-Compliant`. - -. Apply the change to your target clusters by using the {cgu-operator-full}. For more information about rolling out configuration changes, see the "Additional resources" section. - -. Monitor the process. When the status of the `common-subscriptions-policy` policy for a target cluster is `Compliant`, the Performance Addon Operator has been removed from the cluster. Get the status of the `common-subscriptions-policy` by running the following command: -+ -[source,terminal] ----- -$ oc get policy -n ztp-common common-subscriptions-policy ----- - -. Delete the Performance Addon Operator namespace, Operator group and subscription CRs from `.spec.sourceFiles` in the `common-ranGen.yaml` file. - -. Merge the changes with your custom site repository and wait for the ArgoCD application to synchronize the change to the hub cluster. The policy remains compliant. diff --git a/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc deleted file mode 100644 index e25a1d94db4f..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-platform-update_{context}"] -= Performing a platform update - -You can perform a platform update with the {cgu-operator}. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Mirror the desired image repository. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Create a `PolicyGenTemplate` CR for the platform update: -.. Save the following contents of the `PolicyGenTemplate` CR in the `du-upgrade.yaml` file. -+ -.Example of `PolicyGenTemplate` for platform update -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - - fileName: ImageSignature.yaml <1> - policyName: "platform-upgrade-prep" - binaryData: - ${DIGEST_ALGO}-${DIGEST_ENCODED}: ${SIGNATURE_BASE64} <2> - - fileName: DisconnectedICSP.yaml - policyName: "platform-upgrade-prep" - metadata: - name: disconnected-internal-icsp-for-ocp - spec: - repositoryDigestMirrors: <3> - - mirrors: - - quay-intern.example.com/ocp4/openshift-release-dev - source: quay.io/openshift-release-dev/ocp-release - - mirrors: - - quay-intern.example.com/ocp4/openshift-release-dev - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev - - fileName: ClusterVersion.yaml <4> - policyName: "platform-upgrade-prep" - metadata: - name: version - annotations: - ran.openshift.io/ztp-deploy-wave: "1" - spec: - channel: "stable-{product-version}" - upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} - - fileName: ClusterVersion.yaml <5> - policyName: "platform-upgrade" - metadata: - name: version - spec: - channel: "stable-{product-version}" - upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} - desiredUpdate: - version: {product-version}.4 - status: - history: - - version: {product-version}.4 - state: "Completed" ----- -<1> The `ConfigMap` CR contains the signature of the desired release image to update to. -<2> Shows the image signature of the desired {product-title} release. Get the signature from the `checksum-${OCP_RELASE_NUMBER}.yaml` file you saved when following the procedures in the "Setting up the environment" section. -<3> Shows the mirror repository that contains the desired {product-title} image. Get the mirrors from the `imageContentSources.yaml` file that you saved when following the procedures in the "Setting up the environment" section. -<4> Shows the `ClusterVersion` CR to update upstream. -<5> Shows the `ClusterVersion` CR to trigger the update. The `channel`, `upstream`, and `desiredVersion` fields are all required for image pre-caching. -+ -The `PolicyGenTemplate` CR generates two policies: - -* The `du-upgrade-platform-upgrade-prep` policy does the preparation work for the platform update. It creates the `ConfigMap` CR for the desired release image signature, creates the image content source of the mirrored release image repository, and updates the cluster version with the desired update channel and the update graph reachable by the managed cluster in the disconnected environment. - -* The `du-upgrade-platform-upgrade` policy is used to perform platform upgrade. - -.. Add the `du-upgrade.yaml` file contents to the `kustomization.yaml` file located in the {ztp} Git repository for the `PolicyGenTemplate` CRs and push the changes to the Git repository. -+ -ArgoCD pulls the changes from the Git repository and generates the policies on the hub cluster. - -.. Check the created policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep platform-upgrade ----- - -. Apply the required update resources before starting the platform update with the {cgu-operator}. - -.. Save the content of the `platform-upgrade-prep` `ClusterUpgradeGroup` CR with the `du-upgrade-platform-upgrade-prep` policy and the target managed clusters to the `cgu-platform-upgrade-prep.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-upgrade-prep - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade-prep - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: true ----- - -.. Apply the policy to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-upgrade-prep.yml ----- - -.. Monitor the update process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- - -. Create the `ClusterGroupUpdate` CR for the platform update with the `spec.enable` field set to `false`. - -.. Save the content of the platform update `ClusterGroupUpdate` CR with the `du-upgrade-platform-upgrade` policy and the target clusters to the `cgu-platform-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade - preCaching: false - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: false ----- - -.. Apply the `ClusterGroupUpdate` CR to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-upgrade.yml ----- - -. Optional: Pre-cache the images for the platform update. -.. Enable pre-caching in the `ClusterGroupUpdate` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-platform-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the update process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the hub cluster: -+ -[source,terminal] ----- -$ oc get cgu cgu-platform-upgrade -o jsonpath='{.status.precaching.status}' ----- - -. Start the platform update: -.. Enable the `cgu-platform-upgrade` policy and disable pre-caching by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-platform-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc deleted file mode 100644 index f707f204b538..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-policies-concept_{context}"] -= Update policies on managed clusters - -The {cgu-operator-first} remediates a set of `inform` policies for the clusters specified in the `ClusterGroupUpgrade` CR. {cgu-operator} remediates `inform` policies by making `enforce` copies of the managed {rh-rhacm} policies. Each copied policy has its own corresponding {rh-rhacm} placement rule and {rh-rhacm} placement binding. - -One by one, {cgu-operator} adds each cluster from the current batch to the placement rule that corresponds with the applicable managed policy. If a cluster is already compliant with a policy, {cgu-operator} skips applying that policy on the compliant cluster. {cgu-operator} then moves on to applying the next policy to the non-compliant cluster. After {cgu-operator} completes the updates in a batch, all clusters are removed from the placement rules associated with the copied policies. Then, the update of the next batch starts. - -If a spoke cluster does not report any compliant state to {rh-rhacm}, the managed policies on the hub cluster can be missing status information that {cgu-operator} needs. {cgu-operator} handles these cases in the following ways: - -* If a policy's `status.compliant` field is missing, {cgu-operator} ignores the policy and adds a log entry. Then, {cgu-operator} continues looking at the policy's `status.status` field. -* If a policy's `status.status` is missing, {cgu-operator} produces an error. -* If a cluster's compliance status is missing in the policy's `status.status` field, {cgu-operator} considers that cluster to be non-compliant with that policy. - -The `ClusterGroupUpgrade` CR's `batchTimeoutAction` determines what happens if an upgrade fails for a cluster. You can specify `continue` to skip the failing cluster and continue to upgrade other clusters, or specify `abort` to stop the policy remediation for all clusters. Once the timeout elapses, {cgu-operator} removes all enforce policies to ensure that no further updates are made to clusters. - -For more information about {rh-rhacm} policies, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html-single/governance/index#policy-overview[Policy overview]. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc deleted file mode 100644 index 7a5800a3c622..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-precache-feature-concept_{context}"] -= Using the container image pre-cache feature - -{sno-caps} clusters might have limited bandwidth to access the container image registry, which can cause a timeout before the updates are completed. - -[NOTE] -==== -The time of the update is not set by {cgu-operator}. You can apply the `ClusterGroupUpgrade` CR at the beginning of the update by manual application or by external automation. -==== - -The container image pre-caching starts when the `preCaching` field is set to `true` in the `ClusterGroupUpgrade` CR. - -{cgu-operator} uses the `PrecacheSpecValid` condition to report status information as follows: - -* `true` -+ -The pre-caching spec is valid and consistent. -* `false` -+ -The pre-caching spec is incomplete. - -{cgu-operator} uses the `PrecachingSucceeded` condition to report status information as follows: - -* `true` -+ -{cgu-operator} has concluded the pre-caching process. If pre-caching fails for any cluster, the update fails for that cluster but proceeds for all other clusters. A message informs you if pre-caching has failed for any clusters. -* `false` -+ -Pre-caching is still in progress for one or more clusters or has failed for all clusters. - -After a successful pre-caching process, you can start remediating policies. The remediation actions start when the `enable` field is set to `true`. If there is a pre-caching failure on a cluster, the upgrade fails for that cluster. The upgrade process continues for all other clusters that have a successful pre-cache. - -The pre-caching process can be in the following statuses: - -* `NotStarted` -+ -This is the initial state all clusters are automatically assigned to on the first reconciliation pass of the `ClusterGroupUpgrade` CR. In this state, {cgu-operator} deletes any pre-caching namespace and hub view resources of spoke clusters that remain from previous incomplete updates. {cgu-operator} then creates a new `ManagedClusterView` resource for the spoke pre-caching namespace to verify its deletion in the `PrecachePreparing` state. -* `PreparingToStart` -+ -Cleaning up any remaining resources from previous incomplete updates is in progress. -* `Starting` -+ -Pre-caching job prerequisites and the job are created. -* `Active` -+ -The job is in "Active" state. -* `Succeeded` -+ -The pre-cache job succeeded. -* `PrecacheTimeout` -+ -The artifact pre-caching is partially done. -* `UnrecoverableError` -+ -The job ends with a non-zero exit code. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc deleted file mode 100644 index 60c01cb5bd7d..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc +++ /dev/null @@ -1,160 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-precache-start_and_update_{context}"] -= Creating a ClusterGroupUpgrade CR with pre-caching - -For {sno}, the pre-cache feature allows the required container images to be present on the spoke cluster before the update starts. - -[NOTE] -==== -For pre-caching, {cgu-operator} uses the `spec.remediationStrategy.timeout` value from the `ClusterGroupUpgrade` CR. You must set a `timeout` value that allows sufficient time for the pre-caching job to complete. When you enable the `ClusterGroupUpgrade` CR after pre-caching has completed, you can change the `timeout` value to a duration that is appropriate for the update. -==== - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR with the `preCaching` field set to `true` in the `clustergroupupgrades-group-du.yaml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: du-upgrade-4918 - namespace: ztp-group-du-sno -spec: - preCaching: true <1> - clusters: - - cnfdb1 - - cnfdb2 - enable: false - managedPolicies: - - du-upgrade-platform-upgrade - remediationStrategy: - maxConcurrency: 2 - timeout: 240 ----- -<1> The `preCaching` field is set to `true`, which enables {cgu-operator} to pull the container images before starting the update. - -. When you want to start pre-caching, apply the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f clustergroupupgrades-group-du.yaml ----- - -.Verification - -. Check if the `ClusterGroupUpgrade` CR exists in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -A ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME AGE STATE DETAILS -ztp-group-du-sno du-upgrade-4918 10s InProgress Precaching is required and not done <1> ----- -<1> The CR is created. - -. Check the status of the pre-caching task by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -{ - "conditions": [ - { - "lastTransitionTime": "2022-01-27T19:07:24Z", - "message": "Precaching is required and not done", - "reason": "InProgress", - "status": "False", - "type": "PrecachingSucceeded" - }, - { - "lastTransitionTime": "2022-01-27T19:07:34Z", - "message": "Pre-caching spec is valid and consistent", - "reason": "PrecacheSpecIsWellFormed", - "status": "True", - "type": "PrecacheSpecValid" - } - ], - "precaching": { - "clusters": [ - "cnfdb1" <1> - "cnfdb2" - ], - "spec": { - "platformImage": "image.example.io"}, - "status": { - "cnfdb1": "Active" - "cnfdb2": "Succeeded"} - } -} ----- -<1> Displays the list of identified clusters. - -. Check the status of the pre-caching job by running the following command on the spoke cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talo-pre-cache ----- -+ -.Example output -+ -[source,terminal] ----- -NAME COMPLETIONS DURATION AGE -job.batch/pre-cache 0/1 3m10s 3m10s - -NAME READY STATUS RESTARTS AGE -pod/pre-cache--1-9bmlr 1/1 Running 0 3m10s ----- - - . Check the status of the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -"conditions": [ - { - "lastTransitionTime": "2022-01-27T19:30:41Z", - "message": "The ClusterGroupUpgrade CR has all clusters compliant with all the managed policies", - "reason": "UpgradeCompleted", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2022-01-27T19:28:57Z", - "message": "Precaching is completed", - "reason": "PrecachingCompleted", - "status": "True", - "type": "PrecachingSucceeded" <1> - } ----- -<1> The pre-cache tasks are done. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc deleted file mode 100644 index 3e2201430bee..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-6848 (4.13), Story TELCODOCS-949 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-precache-feature-image-filter_{context}"] -= Using the container image pre-cache filter - -The pre-cache feature typically downloads more images than a cluster needs for an update. You can control which pre-cache images are downloaded to a cluster. This decreases download time, and saves bandwidth and storage. - -You can see a list of all images to be downloaded using the following command: - -[source,terminal] ----- -$ oc adm release info ----- - -The following `ConfigMap` example shows how you can exclude images using the `excludePrecachePatterns` field. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-group-upgrade-overrides -data: - excludePrecachePatterns: | - azure <1> - aws - vsphere - alibaba ----- -<1> {cgu-operator} excludes all images with names that include any of the patterns listed here. \ No newline at end of file diff --git a/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc b/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc deleted file mode 100644 index 6434f48dd6f5..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-platform-prepare-end-to-end_{context}"] -= Updating clusters in a disconnected environment - -You can upgrade managed clusters and Operators for managed clusters that you have deployed using {ztp-first} and {cgu-operator-first}. - -[id="talo-platform-prepare-for-update-env-setup_{context}"] -== Setting up the environment - -{cgu-operator} can perform both platform and Operator updates. - -You must mirror both the platform image and Operator images that you want to update to in your mirror registry before you can use {cgu-operator} to update your disconnected clusters. Complete the following steps to mirror the images: - -* For platform updates, you must perform the following steps: -+ -. Mirror the desired {product-title} image repository. Ensure that the desired platform image is mirrored by following the "Mirroring the {product-title} image repository" procedure linked in the Additional Resources. Save the contents of the `imageContentSources` section in the `imageContentSources.yaml` file: -+ -.Example output -[source,yaml] ----- -imageContentSources: - - mirrors: - - mirror-ocp-registry.ibmcloud.io.cpak:5000/openshift-release-dev/openshift4 - source: quay.io/openshift-release-dev/ocp-release - - mirrors: - - mirror-ocp-registry.ibmcloud.io.cpak:5000/openshift-release-dev/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- - -. Save the image signature of the desired platform image that was mirrored. You must add the image signature to the `PolicyGenTemplate` CR for platform updates. To get the image signature, perform the following steps: - -.. Specify the desired {product-title} tag by running the following command: -+ -[source,terminal] ----- -$ OCP_RELEASE_NUMBER= ----- - -.. Specify the architecture of the cluster by running the following command: -+ -[source,terminal] ----- -$ ARCHITECTURE= <1> ----- -<1> Specify the architecture of the cluster, such as `x86_64`, `aarch64`, `s390x`, or `ppc64le`. - - -.. Get the release image digest from Quay by running the following command -+ -[source,terminal] ----- -$ DIGEST="$(oc adm release info quay.io/openshift-release-dev/ocp-release:${OCP_RELEASE_NUMBER}-${ARCHITECTURE} | sed -n 's/Pull From: .*@//p')" ----- - -.. Set the digest algorithm by running the following command: -+ -[source,terminal] ----- -$ DIGEST_ALGO="${DIGEST%%:*}" ----- - -.. Set the digest signature by running the following command: -+ -[source,terminal] ----- -$ DIGEST_ENCODED="${DIGEST#*:}" ----- - -.. Get the image signature from the link:https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release/[mirror.openshift.com] website by running the following command: -+ -[source,terminal] ----- -$ SIGNATURE_BASE64=$(curl -s "https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release/${DIGEST_ALGO}=${DIGEST_ENCODED}/signature-1" | base64 -w0 && echo) ----- - -.. Save the image signature to the `checksum-.yaml` file by running the following commands: -+ -[source,terminal] ----- -$ cat >checksum-${OCP_RELEASE_NUMBER}.yaml <> -** <> -** <> -** <> - -To ensure that the `ClusterGroupUpgrade` configuration is functional, you can do the following: - -. Create the `ClusterGroupUpgrade` CR with the `spec.enable` field set to `false`. - -. Wait for the status to be updated and go through the troubleshooting questions. - -. If everything looks as expected, set the `spec.enable` field to `true` in the `ClusterGroupUpgrade` CR. - -[WARNING] -==== -After you set the `spec.enable` field to `true` in the `ClusterUpgradeGroup` CR, the update procedure starts and you cannot edit the CR's `spec` fields anymore. -==== - -[id="talo-troubleshooting-modify-cgu_{context}"] -== Cannot modify the ClusterUpgradeGroup CR - -Issue:: You cannot edit the `ClusterUpgradeGroup` CR after enabling the update. - -Resolution:: Restart the procedure by performing the following steps: -+ -. Remove the old `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc delete cgu -n ----- -+ -. Check and fix the existing issues with the managed clusters and policies. -.. Ensure that all the clusters are managed clusters and available. -.. Ensure that all the policies exist and have the `spec.remediationAction` field set to `inform`. -+ -. Create a new `ClusterGroupUpgrade` CR with the correct configurations. -+ -[source,terminal] ----- -$ oc apply -f ----- - -[id="talo-troubleshooting-managed-policies_{context}"] -== Managed policies - -[discrete] -== Checking managed policies on the system - -Issue:: You want to check if you have the correct managed policies on the system. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.managedPolicies}' ----- -+ -.Example output -+ -[source,json] ----- -["group-du-sno-validator-du-validator-policy", "policy2-common-nto-sub-policy", "policy3-common-ptp-sub-policy"] ----- - -[discrete] -== Checking remediationAction mode - -Issue:: You want to check if the `remediationAction` field is set to `inform` in the `spec` of the managed policies. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default policy1-common-cluster-version-policy inform NonCompliant 5d21h -default policy2-common-nto-sub-policy inform Compliant 5d21h -default policy3-common-ptp-sub-policy inform NonCompliant 5d21h -default policy4-common-sriov-sub-policy inform NonCompliant 5d21h ----- - -[discrete] -== Checking policy compliance state - -Issue:: You want to check the compliance state of policies. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default policy1-common-cluster-version-policy inform NonCompliant 5d21h -default policy2-common-nto-sub-policy inform Compliant 5d21h -default policy3-common-ptp-sub-policy inform NonCompliant 5d21h -default policy4-common-sriov-sub-policy inform NonCompliant 5d21h ----- - -[id="talo-troubleshooting-clusters_{context}"] -== Clusters - -[discrete] -=== Checking if managed clusters are present - -Issue:: You want to check if the clusters in the `ClusterGroupUpgrade` CR are managed clusters. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedclusters ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.hub.example.com:6443 True Unknown 13d -spoke1 true https://api.spoke1.example.com:6443 True True 13d -spoke3 true https://api.spoke3.example.com:6443 True True 27h ----- - -. Alternatively, check the {cgu-operator} manager logs: - -.. Get the name of the {cgu-operator} manager by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n openshift-operators ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp 2/2 Running 0 45m ----- - -.. Check the {cgu-operator} manager logs by running the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-operators \ -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp -c manager ----- -+ -.Example output -+ -[source,terminal] ----- -ERROR controller-runtime.manager.controller.clustergroupupgrade Reconciler error {"reconciler group": "ran.openshift.io", "reconciler kind": "ClusterGroupUpgrade", "name": "lab-upgrade", "namespace": "default", "error": "Cluster spoke5555 is not a ManagedCluster"} <1> -sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem ----- -<1> The error message shows that the cluster is not a managed cluster. - -[discrete] -=== Checking if managed clusters are available - -Issue:: You want to check if the managed clusters specified in the `ClusterGroupUpgrade` CR are available. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedclusters ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.hub.testlab.com:6443 True Unknown 13d -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d <1> -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h <1> ----- -<1> The value of the `AVAILABLE` field is `True` for the managed clusters. - -[discrete] -=== Checking clusterLabelSelector - -Issue:: You want to check if the `clusterLabelSelector` field specified in the `ClusterGroupUpgrade` CR matches at least one of the managed clusters. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedcluster --selector=upgrade=true <1> ----- -<1> The label for the clusters you want to update is `upgrade:true`. -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h ----- - -[discrete] -=== Checking if canary clusters are present - -Issue:: You want to check if the canary clusters are present in the list of clusters. -+ -.Example `ClusterGroupUpgrade` CR -[source,yaml] ----- -spec: - remediationStrategy: - canaries: - - spoke3 - maxConcurrency: 2 - timeout: 240 - clusterLabelSelectors: - - matchLabels: - upgrade: true ----- - -Resolution:: Run the following commands: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.clusters}' ----- -+ -.Example output -+ -[source,json] ----- -["spoke1", "spoke3"] ----- - -. Check if the canary clusters are present in the list of clusters that match `clusterLabelSelector` labels by running the following command: -+ -[source,terminal] ----- -$ oc get managedcluster --selector=upgrade=true ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h ----- - -[NOTE] -==== -A cluster can be present in `spec.clusters` and also be matched by the `spec.clusterLabelSelector` label. -==== - -[discrete] -=== Checking the pre-caching status on spoke clusters - -. Check the status of pre-caching by running the following command on the spoke cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talo-pre-cache ----- - -[id="talo-troubleshooting-remediation-strategy_{context}"] -== Remediation Strategy - -[discrete] -=== Checking if remediationStrategy is present in the ClusterGroupUpgrade CR - -Issue:: You want to check if the `remediationStrategy` is present in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.remediationStrategy}' ----- -+ -.Example output -+ -[source,json] ----- -{"maxConcurrency":2, "timeout":240} ----- - -[discrete] -=== Checking if maxConcurrency is specified in the ClusterGroupUpgrade CR - -Issue:: You want to check if the `maxConcurrency` is specified in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.remediationStrategy.maxConcurrency}' ----- -+ -.Example output -+ -[source,terminal] ----- -2 ----- - -[id="talo-troubleshooting-remediation-talo_{context}"] -== {cgu-operator-full} - -[discrete] -=== Checking condition message and status in the ClusterGroupUpgrade CR - -Issue:: You want to check the value of the `status.conditions` field in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.status.conditions}' ----- -+ -.Example output -+ -[source,json] ----- -{"lastTransitionTime":"2022-02-17T22:25:28Z", "message":"Missing managed policies:[policyList]", "reason":"NotAllManagedPoliciesExist", "status":"False", "type":"Validated"} ----- - -[discrete] -=== Checking corresponding copied policies - -Issue:: You want to check if every policy from `status.managedPoliciesForUpgrade` has a corresponding policy in `status.copiedPolicies`. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -oyaml ----- -+ -.Example output -+ -[source,yaml] ----- -status: - … - copiedPolicies: - - lab-upgrade-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy3-common-ptp-sub-policy - namespace: default ----- - -[discrete] -=== Checking if status.remediationPlan was computed - -Issue:: You want to check if `status.remediationPlan` is computed. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.status.remediationPlan}' ----- -+ -.Example output -+ -[source,json] ----- -[["spoke2", "spoke3"]] ----- - -[discrete] -=== Errors in the {cgu-operator} manager container - -Issue:: You want to check the logs of the manager container of {cgu-operator}. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-operators \ -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp -c manager ----- -+ -.Example output -+ -[source,terminal] ----- -ERROR controller-runtime.manager.controller.clustergroupupgrade Reconciler error {"reconciler group": "ran.openshift.io", "reconciler kind": "ClusterGroupUpgrade", "name": "lab-upgrade", "namespace": "default", "error": "Cluster spoke5555 is not a ManagedCluster"} <1> -sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem ----- -<1> Displays the error. - -[discrete] -=== Clusters are not compliant to some policies after a `ClusterGroupUpgrade` CR has completed - -Issue:: The policy compliance status that {cgu-operator} uses to decide if remediation is needed has not yet fully updated for all clusters. -This may be because: -* The CGU was run too soon after a policy was created or updated. -* The remediation of a policy affects the compliance of subsequent policies in the `ClusterGroupUpgrade` CR. - -Resolution:: Create and apply a new `ClusterGroupUpdate` CR with the same specification. - -[discrete] -[id="talo-troubleshooting-auto-create-policies_{context}"] -=== Auto-created `ClusterGroupUpgrade` CR in the {ztp} workflow has no managed policies - -Issue:: If there are no policies for the managed cluster when the cluster becomes `Ready`, a `ClusterGroupUpgrade` CR with no policies is auto-created. -Upon completion of the `ClusterGroupUpgrade` CR, the managed cluster is labeled as `ztp-done`. -If the `PolicyGenTemplate` CRs were not pushed to the Git repository within the required time after `SiteConfig` resources were pushed, this might result in no policies being available for the target cluster when the cluster became `Ready`. - -Resolution:: Verify that the policies you want to apply are available on the hub cluster, then create a `ClusterGroupUpgrade` CR with the required policies. - -You can either manually create the `ClusterGroupUpgrade` CR or trigger auto-creation again. To trigger auto-creation of the `ClusterGroupUpgrade` CR, remove the `ztp-done` label from the cluster and delete the empty `ClusterGroupUpgrade` CR that was previously created in the `zip-install` namespace. - -[discrete] -[id="talo-troubleshooting-pre-cache-failed_{context}"] -=== Pre-caching has failed - -Issue:: Pre-caching might fail for one of the following reasons: -* There is not enough free space on the node. -* For a disconnected environment, the pre-cache image has not been properly mirrored. -* There was an issue when creating the pod. - -Resolution:: -. To check if pre-caching has failed due to insufficient space, check the log of the pre-caching pod in the node. -.. Find the name of the pod using the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-talo-pre-cache ----- -+ -.. Check the logs to see if the error is related to insufficient space using the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-talo-pre-cache ----- -+ -. If there is no log, check the pod status using the following command: -+ -[source,terminal] ----- -$ oc describe pod -n openshift-talo-pre-cache ----- -+ -. If the pod does not exist, check the job status to see why it could not create a pod using the following command: -+ -[source,terminal] ----- -$ oc describe job -n openshift-talo-pre-cache pre-cache ----- diff --git a/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc b/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc deleted file mode 100644 index 204a38430681..000000000000 --- a/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-troubleshooting-common-ptp-operator-issues_{context}"] -= Troubleshooting common PTP Operator issues - -Troubleshoot common problems with the PTP Operator by performing the following steps. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the PTP Operator on a bare-metal cluster with hosts that support PTP. - -.Procedure - -. Check the Operator and operands are successfully deployed in the cluster for the configured nodes. -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-lmvgn 3/3 Running 0 4d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-qhfg7 3/3 Running 0 4d17h 10.1.196.25 compute-1.example.com -ptp-operator-6b8dcbf7f4-zndk7 1/1 Running 0 5d7h 10.129.0.61 control-plane-1.example.com ----- -+ -[NOTE] -==== -When the PTP fast event bus is enabled, the number of ready `linuxptp-daemon` pods is `3/3`. If the PTP fast event bus is not enabled, `2/2` is displayed. -==== - -. Check that supported hardware is found in the cluster. -+ -[source,terminal] ----- -$ oc -n openshift-ptp get nodeptpdevices.ptp.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -control-plane-0.example.com 10d -control-plane-1.example.com 10d -compute-0.example.com 10d -compute-1.example.com 10d -compute-2.example.com 10d ----- - -. Check the available PTP network interfaces for a node: -+ -[source,terminal] ----- -$ oc -n openshift-ptp get nodeptpdevices.ptp.openshift.io -o yaml ----- -+ -where: -+ -:: Specifies the node you want to query, for example, `compute-0.example.com`. -+ -.Example output -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: NodePtpDevice -metadata: - creationTimestamp: "2021-09-14T16:52:33Z" - generation: 1 - name: compute-0.example.com - namespace: openshift-ptp - resourceVersion: "177400" - uid: 30413db0-4d8d-46da-9bef-737bacd548fd -spec: {} -status: - devices: - - name: eno1 - - name: eno2 - - name: eno3 - - name: eno4 - - name: enp5s0f0 - - name: enp5s0f1 ----- - -. Check that the PTP interface is successfully synchronized to the primary clock by accessing the `linuxptp-daemon` pod for the corresponding node. - -.. Get the name of the `linuxptp-daemon` pod and corresponding node you want to troubleshoot by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-lmvgn 3/3 Running 0 4d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-qhfg7 3/3 Running 0 4d17h 10.1.196.25 compute-1.example.com -ptp-operator-6b8dcbf7f4-zndk7 1/1 Running 0 5d7h 10.129.0.61 control-plane-1.example.com ----- - -.. Remote shell into the required `linuxptp-daemon` container: -+ -[source,terminal] ----- -$ oc rsh -n openshift-ptp -c linuxptp-daemon-container ----- -+ -where: -+ -:: is the container you want to diagnose, for example `linuxptp-daemon-lmvgn`. - -.. In the remote shell connection to the `linuxptp-daemon` container, use the PTP Management Client (`pmc`) tool to diagnose the network interface. Run the following `pmc` command to check the sync status of the PTP device, for example `ptp4l`. -+ -[source,terminal] ----- -# pmc -u -f /var/run/ptp4l.0.config -b 0 'GET PORT_DATA_SET' ----- -+ -.Example output when the node is successfully synced to the primary clock -[source,terminal] ----- -sending: GET PORT_DATA_SET - 40a6b7.fffe.166ef0-1 seq 0 RESPONSE MANAGEMENT PORT_DATA_SET - portIdentity 40a6b7.fffe.166ef0-1 - portState SLAVE - logMinDelayReqInterval -4 - peerMeanPathDelay 0 - logAnnounceInterval -3 - announceReceiptTimeout 3 - logSyncInterval -4 - delayMechanism 1 - logMinPdelayReqInterval -4 - versionNumber 2 ----- diff --git a/modules/cnf-troubleshooting-missing-rte-config-maps.adoc b/modules/cnf-troubleshooting-missing-rte-config-maps.adoc deleted file mode 100644 index 3421bf9bb70c..000000000000 --- a/modules/cnf-troubleshooting-missing-rte-config-maps.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-troubleshooting-missing-rte-config-maps_{context}"] -= Correcting a missing resource topology exporter config map - -If you install the NUMA Resources Operator in a cluster with misconfigured cluster settings, in some circumstances, the Operator is shown as active but the logs of the resource topology exporter (RTE) daemon set pods show that the configuration for the RTE is missing, for example: - -[source,text] ----- -Info: couldn't find configuration in "/etc/resource-topology-exporter/config.yaml" ----- - -This log message indicates that the `kubeletconfig` with the required configuration was not properly applied in the cluster, resulting in a missing RTE `configmap`. For example, the following cluster is missing a `numaresourcesoperator-worker` `configmap` custom resource (CR): - -[source,terminal] ----- -$ oc get configmap ----- - -.Example output -[source,terminal] ----- -NAME DATA AGE -0e2a6bd3.openshift-kni.io 0 6d21h -kube-root-ca.crt 1 6d21h -openshift-service-ca.crt 1 6d21h -topo-aware-scheduler-config 1 6d18h ----- - -In a correctly configured cluster, `oc get configmap` also returns a `numaresourcesoperator-worker` `configmap` CR. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with cluster-admin privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Compare the values for `spec.machineConfigPoolSelector.matchLabels` in `kubeletconfig` and -`metadata.labels` in the `MachineConfigPool` (`mcp`) worker CR using the following commands: - -.. Check the `kubeletconfig` labels by running the following command: -+ -[source,terminal] ----- -$ oc get kubeletconfig -o yaml ----- -+ -.Example output -[source,yaml] ----- -machineConfigPoolSelector: - matchLabels: - cnf-worker-tuning: enabled ----- - -.. Check the `mcp` labels by running the following command: -+ -[source,terminal] ----- -$ oc get mcp worker -o yaml ----- -+ -.Example output -[source,yaml] ----- -labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" ----- -+ -The `cnf-worker-tuning: enabled` label is not present in the `MachineConfigPool` object. - -. Edit the `MachineConfigPool` CR to include the missing label, for example: -+ -[source,terminal] ----- -$ oc edit mcp worker -o yaml ----- -+ -.Example output -[source,yaml] ----- -labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - cnf-worker-tuning: enabled ----- - -. Apply the label changes and wait for the cluster to apply the updated configuration. Run the following command: - -.Verification - -* Check that the missing `numaresourcesoperator-worker` `configmap` CR is applied: -+ -[source,terminal] ----- -$ oc get configmap ----- -+ -.Example output -[source,terminal] ----- -NAME DATA AGE -0e2a6bd3.openshift-kni.io 0 6d21h -kube-root-ca.crt 1 6d21h -numaresourcesoperator-worker 1 5m -openshift-service-ca.crt 1 6d21h -topo-aware-scheduler-config 1 6d18h ----- diff --git a/modules/cnf-troubleshooting-numa-aware-workloads.adoc b/modules/cnf-troubleshooting-numa-aware-workloads.adoc deleted file mode 100644 index 9a753b1f34ec..000000000000 --- a/modules/cnf-troubleshooting-numa-aware-workloads.adoc +++ /dev/null @@ -1,208 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-troubleshooting-numa-aware-workloads_{context}"] -= Troubleshooting NUMA-aware scheduling - -To troubleshoot common problems with NUMA-aware pod scheduling, perform the following steps. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with cluster-admin privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Verify that the `noderesourcetopologies` CRD is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get crd | grep noderesourcetopologies ----- -+ -.Example output -[source,terminal] ----- -NAME CREATED AT -noderesourcetopologies.topology.node.k8s.io 2022-01-18T08:28:06Z ----- - -. Check that the NUMA-aware scheduler name matches the name specified in your NUMA-aware workloads by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Verify that NUMA-aware scheduable nodes have the `noderesourcetopologies` CR applied to them. Run the following command: -+ -[source,terminal] ----- -$ oc get noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -compute-0.example.com 17h -compute-1.example.com 17h ----- -+ -[NOTE] -==== -The number of nodes should equal the number of worker nodes that are configured by the machine config pool (`mcp`) worker definition. -==== - -. Verify the NUMA zone granularity for all scheduable nodes by running the following command: -+ -[source,terminal] ----- -$ oc get noderesourcetopologies.topology.node.k8s.io -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: topology.node.k8s.io/v1 - kind: NodeResourceTopology - metadata: - annotations: - k8stopoawareschedwg/rte-update: periodic - creationTimestamp: "2022-06-16T08:55:38Z" - generation: 63760 - name: worker-0 - resourceVersion: "8450223" - uid: 8b77be46-08c0-4074-927b-d49361471590 - topologyPolicies: - - SingleNUMANodeContainerLevel - zones: - - costs: - - name: node-0 - value: 10 - - name: node-1 - value: 21 - name: node-0 - resources: - - allocatable: "38" - available: "38" - capacity: "40" - name: cpu - - allocatable: "134217728" - available: "134217728" - capacity: "134217728" - name: hugepages-2Mi - - allocatable: "262352048128" - available: "262352048128" - capacity: "270107316224" - name: memory - - allocatable: "6442450944" - available: "6442450944" - capacity: "6442450944" - name: hugepages-1Gi - type: Node - - costs: - - name: node-0 - value: 21 - - name: node-1 - value: 10 - name: node-1 - resources: - - allocatable: "268435456" - available: "268435456" - capacity: "268435456" - name: hugepages-2Mi - - allocatable: "269231067136" - available: "269231067136" - capacity: "270573244416" - name: memory - - allocatable: "40" - available: "40" - capacity: "40" - name: cpu - - allocatable: "1073741824" - available: "1073741824" - capacity: "1073741824" - name: hugepages-1Gi - type: Node -- apiVersion: topology.node.k8s.io/v1 - kind: NodeResourceTopology - metadata: - annotations: - k8stopoawareschedwg/rte-update: periodic - creationTimestamp: "2022-06-16T08:55:37Z" - generation: 62061 - name: worker-1 - resourceVersion: "8450129" - uid: e8659390-6f8d-4e67-9a51-1ea34bba1cc3 - topologyPolicies: - - SingleNUMANodeContainerLevel - zones: <1> - - costs: - - name: node-0 - value: 10 - - name: node-1 - value: 21 - name: node-0 - resources: <2> - - allocatable: "38" - available: "38" - capacity: "40" - name: cpu - - allocatable: "6442450944" - available: "6442450944" - capacity: "6442450944" - name: hugepages-1Gi - - allocatable: "134217728" - available: "134217728" - capacity: "134217728" - name: hugepages-2Mi - - allocatable: "262391033856" - available: "262391033856" - capacity: "270146301952" - name: memory - type: Node - - costs: - - name: node-0 - value: 21 - - name: node-1 - value: 10 - name: node-1 - resources: - - allocatable: "40" - available: "40" - capacity: "40" - name: cpu - - allocatable: "1073741824" - available: "1073741824" - capacity: "1073741824" - name: hugepages-1Gi - - allocatable: "268435456" - available: "268435456" - capacity: "268435456" - name: hugepages-2Mi - - allocatable: "269192085504" - available: "269192085504" - capacity: "270534262784" - name: memory - type: Node -kind: List -metadata: - resourceVersion: "" - selfLink: "" ----- -<1> Each stanza under `zones` describes the resources for a single NUMA zone. -<2> `resources` describes the current state of the NUMA zone resources. Check that resources listed under `items.zones.resources.available` correspond to the exclusive NUMA zone resources allocated to each guaranteed pod. diff --git a/modules/cnf-troubleshooting-resource-topo-exporter.adoc b/modules/cnf-troubleshooting-resource-topo-exporter.adoc deleted file mode 100644 index c25952cd3cb0..000000000000 --- a/modules/cnf-troubleshooting-resource-topo-exporter.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-troubleshooting-resource-topo-exporter_{context}"] -= Troubleshooting the resource topology exporter - -Troubleshoot `noderesourcetopologies` objects where unexpected results are occurring by inspecting the corresponding `resource-topology-exporter` logs. - -[NOTE] -==== -It is recommended that NUMA resource topology exporter instances in the cluster are named for nodes they refer to. For example, a worker node with the name `worker` should have a corresponding `noderesourcetopologies` object called `worker`. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Get the daemonsets managed by the NUMA Resources Operator. Each daemonset has a corresponding `nodeGroup` in the `NUMAResourcesOperator` CR. Run the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io numaresourcesoperator -o jsonpath="{.status.daemonsets[0]}" ----- -+ -.Example output -[source,json] ----- -{"name":"numaresourcesoperator-worker","namespace":"openshift-numaresources"} ----- - -. Get the label for the daemonset of interest using the value for `name` from the previous step: -+ -[source,terminal] ----- -$ oc get ds -n openshift-numaresources numaresourcesoperator-worker -o jsonpath="{.spec.selector.matchLabels}" ----- -+ -.Example output -[source,json] ----- -{"name":"resource-topology"} ----- - -. Get the pods using the `resource-topology` label by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources -l name=resource-topology -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -numaresourcesoperator-worker-5wm2k 2/2 Running 0 2d1h 10.135.0.64 compute-0.example.com -numaresourcesoperator-worker-pb75c 2/2 Running 0 2d1h 10.132.2.33 compute-1.example.com ----- - -. Examine the logs of the `resource-topology-exporter` container running on the worker pod that corresponds to the node you are troubleshooting. Run the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-numaresources -c resource-topology-exporter numaresourcesoperator-worker-pb75c ----- -+ -.Example output -[source,terminal] ----- -I0221 13:38:18.334140 1 main.go:206] using sysinfo: -reservedCpus: 0,1 -reservedMemory: - "0": 1178599424 -I0221 13:38:18.334370 1 main.go:67] === System information === -I0221 13:38:18.334381 1 sysinfo.go:231] cpus: reserved "0-1" -I0221 13:38:18.334493 1 sysinfo.go:237] cpus: online "0-103" -I0221 13:38:18.546750 1 main.go:72] -cpus: allocatable "2-103" -hugepages-1Gi: - numa cell 0 -> 6 - numa cell 1 -> 1 -hugepages-2Mi: - numa cell 0 -> 64 - numa cell 1 -> 128 -memory: - numa cell 0 -> 45758Mi - numa cell 1 -> 48372Mi ----- diff --git a/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc b/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc deleted file mode 100644 index fdbf20e72290..000000000000 --- a/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 (4.4) -// Epic CNF-422 (4.5) -// scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-tuning-nodes-for-low-latency-via-performanceprofile_{context}"] -= Tuning nodes for low latency with the performance profile - -The performance profile lets you control latency tuning aspects of nodes that belong to a certain machine config pool. After you specify your settings, the `PerformanceProfile` object is compiled into multiple objects that perform the actual node level tuning: - -* A `MachineConfig` file that manipulates the nodes. -* A `KubeletConfig` file that configures the Topology Manager, the CPU Manager, and the {product-title} nodes. -* The Tuned profile that configures the Node Tuning Operator. - -You can use a performance profile to specify whether to update the kernel to kernel-rt, to allocate huge pages, and to partition the CPUs for performing housekeeping duties or running workloads. - -[NOTE] -==== -You can manually create the `PerformanceProfile` object or use the Performance Profile Creator (PPC) to generate a performance profile. See the additional resources below for more information on the PPC. -==== - -.Sample performance profile -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: "4-15" <1> - reserved: "0-3" <2> - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 16 - node: 0 - realTimeKernel: - enabled: true <3> - numa: <4> - topologyPolicy: "best-effort" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" <5> ----- -<1> Use this field to isolate specific CPUs to use with application containers for workloads. Set an even number of isolated CPUs to enable the pods to run without errors when hyperthreading is enabled. -<2> Use this field to reserve specific CPUs to use with infra containers for housekeeping. -<3> Use this field to install the real-time kernel on the node. Valid values are `true` or `false`. Setting the `true` value installs the real-time kernel. -<4> Use this field to configure the topology manager policy. Valid values are `none` (default), `best-effort`, `restricted`, and `single-numa-node`. For more information, see link:https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies[Topology Manager Policies]. -<5> Use this field to specify a node selector to apply the performance profile to specific nodes. diff --git a/modules/cnf-understanding-low-latency.adoc b/modules/cnf-understanding-low-latency.adoc deleted file mode 100644 index ccedad105e70..000000000000 --- a/modules/cnf-understanding-low-latency.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 (4.4) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="cnf-understanding-low-latency_{context}"] -= Understanding low latency - -The emergence of Edge computing in the area of Telco / 5G plays a key role in reducing latency and congestion problems and improving application performance. - -Simply put, latency determines how fast data (packets) moves from the sender to receiver and returns to the sender after processing by the receiver. Maintaining a network architecture with the lowest possible delay of latency speeds is key for meeting the network performance requirements of 5G. Compared to 4G technology, with an average latency of 50 ms, 5G is targeted to reach latency numbers of 1 ms or less. This reduction in latency boosts wireless throughput by a factor of 10. - -Many of the deployed applications in the Telco space require low latency that can only tolerate zero packet loss. Tuning for zero packet loss helps mitigate the inherent issues that degrade network performance. For more information, see link:https://www.redhat.com/en/blog/tuning-zero-packet-loss-red-hat-openstack-platform-part-1[Tuning for Zero Packet Loss in {rh-openstack-first}]. - -The Edge computing initiative also comes in to play for reducing latency rates. Think of it as being on the edge of the cloud and closer to the user. This greatly reduces the distance between the user and distant data centers, resulting in reduced application response times and performance latency. - -Administrators must be able to manage their many Edge sites and local services in a centralized way so that all of the deployments can run at the lowest possible management cost. They also need an easy way to deploy and configure certain nodes of their cluster for real-time low latency and high-performance purposes. Low latency nodes are useful for applications such as Cloud-native Network Functions (CNF) and Data Plane Development Kit (DPDK). - -{product-title} currently provides mechanisms to tune software on an {product-title} cluster for real-time running and low latency (around <20 microseconds reaction time). This includes tuning the kernel and {product-title} set values, installing a kernel, and reconfiguring the machine. But this method requires setting up four different Operators and performing many configurations that, when done manually, is complex and could be prone to mistakes. - -{product-title} uses the Node Tuning Operator to implement automatic tuning to achieve low latency performance for {product-title} applications. The cluster administrator uses this performance profile configuration that makes it easier to make these changes in a more reliable way. The administrator can specify whether to update the kernel to kernel-rt, reserve CPUs for cluster and operating system housekeeping duties, including pod infra containers, and isolate CPUs for application containers to run the workloads. - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported by cgroup v2. As a result, you might not get the desired behavior from performance profiles if you have cgroup v2 enabled. Enabling cgroup v2 is not recommended if you are using performace profiles. -==== - -{product-title} also supports workload hints for the Node Tuning Operator that can tune the `PerformanceProfile` to meet the demands of different industry environments. Workload hints are available for `highPowerConsumption` (very low latency at the cost of increased power consumption) and `realTime` (priority given to optimum latency). A combination of `true/false` settings for these hints can be used to deal with application-specific workload profiles and requirements. - -Workload hints simplify the fine-tuning of performance to industry sector settings. Instead of a “one size fits all” approach, workload hints can cater to usage patterns such as placing priority on: - -* Low latency -* Real-time capability -* Efficient use of power - -In an ideal world, all of those would be prioritized: in real life, some come at the expense of others. The Node Tuning Operator is now aware of the workload expectations and better able to meet the demands of the workload. The cluster admin can now specify into which use case that workload falls. The Node Tuning Operator uses the `PerformanceProfile` to fine tune the performance settings for the workload. - -The environment in which an application is operating influences its behavior. For a typical data center with no strict latency requirements, only minimal default tuning is needed that enables CPU partitioning for some high performance workload pods. For data centers and workloads where latency is a higher priority, measures are still taken to optimize power consumption. The most complicated cases are clusters close to latency-sensitive equipment such as manufacturing machinery and software-defined radios. This last class of deployment is often referred to as Far edge. For Far edge deployments, ultra-low latency is the ultimate priority, and is achieved at the expense of power management. - diff --git a/modules/cnf-understanding-workload-hints.adoc b/modules/cnf-understanding-workload-hints.adoc deleted file mode 100644 index 99479338da11..000000000000 --- a/modules/cnf-understanding-workload-hints.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="cnf-understanding-workload-hints_{context}"] -= Understanding workload hints - -The following table describes how combinations of power consumption and real-time settings impact on latency. -[NOTE] -==== -The following workload hints can be configured manually. You can also work with workload hints using the Performance Profile Creator. For more information about the performance profile, see the "Creating a performance profile" section. -==== - -[cols="1,1,1,1",options="header"] -|=== - | Performance Profile creator setting| Hint | Environment | Description - - | Default - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: false -realTime: false ----- - | High throughput cluster without latency requirements - | Performance achieved through CPU partitioning only. - - - - | Low-latency - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: false -realTime: true ----- - | Regional datacenters - | Both energy savings and low-latency are desirable: compromise between power management, latency and throughput. - - - | Ultra-low-latency - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: true -realTime: true ----- - | Far edge clusters, latency critical workloads - | Optimized for absolute minimal latency and maximum determinism at the cost of increased power consumption. - - | Per-pod power management - a|[source,terminal] ----- -workloadHints: -realTime: true -highPowerConsumption: false -perPodPowerManagement: true ----- - | Critical and non-critical workloads - | Allows for power management per pod. - -|=== \ No newline at end of file diff --git a/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc b/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc deleted file mode 100644 index f09231fed01d..000000000000 --- a/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// CNF-802 Infrastructure-provided interrupt processing for guaranteed pod CPUs -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -[id="use-device-interrupt-processing-for-isolated-cpus_{context}"] -= Upgrading the performance profile to use device interrupt processing - -When you upgrade the Node Tuning Operator performance profile custom resource definition (CRD) from v1 or v1alpha1 to v2, `globallyDisableIrqLoadBalancing` is set to `true` on existing profiles. - -[NOTE] -==== -`globallyDisableIrqLoadBalancing` toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to `true` it disables IRQ load balancing for the Isolated CPU set. Setting the option to `false` allows the IRQs to be balanced across all CPUs. -==== - -[id="nto_supported_api_versions_{context}"] -== Supported API Versions - -The Node Tuning Operator supports `v2`, `v1`, and `v1alpha1` for the performance profile `apiVersion` field. The v1 and v1alpha1 APIs are identical. The v2 API includes an optional boolean field `globallyDisableIrqLoadBalancing` with a default value of `false`. - -[id="upgrading_nto_api_from_v1alpha1_to_v1_{context}"] -=== Upgrading Node Tuning Operator API from v1alpha1 to v1 - -When upgrading Node Tuning Operator API version from v1alpha1 to v1, the v1alpha1 performance profiles are converted on-the-fly using a "None" Conversion strategy and served to the Node Tuning Operator with API version v1. - -[id="upgrading_nto_api_from_v1alpha1_to_v1_or_v2_{context}"] -=== Upgrading Node Tuning Operator API from v1alpha1 or v1 to v2 - -When upgrading from an older Node Tuning Operator API version, the existing v1 and v1alpha1 performance profiles are converted using a conversion webhook that injects the `globallyDisableIrqLoadBalancing` field with a value of `true`. diff --git a/modules/cnf-verifying-queue-status.adoc b/modules/cnf-verifying-queue-status.adoc deleted file mode 100644 index 06a90dc73ffe..000000000000 --- a/modules/cnf-verifying-queue-status.adoc +++ /dev/null @@ -1,225 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="verifying-queue-status_{context}"] -= Verifying the queue status - -In this section, a number of examples illustrate different performance profiles and how to verify the changes are applied. - -.Example 1 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported devices. - -The relevant section from the performance profile is: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true -# ... ----- - -* Display the status of the queues associated with a device using the following command: -+ -[NOTE] -==== -Run this command on the node where the performance profile was applied. -==== -+ -[source,terminal] ----- -$ ethtool -l ----- - -* Verify the queue status before the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- - -<1> The combined channel shows that the total count of reserved CPUs for _all_ supported devices is 2. This matches what is configured in the performance profile. - -.Example 2 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported network devices with a specific `vendorID`. - -The relevant section from the performance profile is: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true - devices: - - vendorID = 0x1af4 -# ... ----- - -* Display the status of the queues associated with a device using the following command: -+ -[NOTE] -==== -Run this command on the node where the performance profile was applied. -==== -+ -[source,terminal] ----- -$ ethtool -l ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- - -<1> The total count of reserved CPUs for all supported devices with `vendorID=0x1af4` is 2. -For example, if there is another network device `ens2` with `vendorID=0x1af4` it will also have total net queues of 2. This matches what is configured in the performance profile. - -.Example 3 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported network devices that match any of the defined device identifiers. - -The command `udevadm info` provides a detailed report on a device. In this example the devices are: - -[source,terminal] ----- -# udevadm info -p /sys/class/net/ens4 -... -E: ID_MODEL_ID=0x1000 -E: ID_VENDOR_ID=0x1af4 -E: INTERFACE=ens4 -... ----- - -[source,terminal] ----- -# udevadm info -p /sys/class/net/eth0 -... -E: ID_MODEL_ID=0x1002 -E: ID_VENDOR_ID=0x1001 -E: INTERFACE=eth0 -... ----- - -* Set the net queues to 2 for a device with `interfaceName` equal to `eth0` and any devices that have a `vendorID=0x1af4` with the following performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true - devices: - - interfaceName = eth0 - - vendorID = 0x1af4 -... ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- -+ -<1> The total count of reserved CPUs for all supported devices with `vendorID=0x1af4` is set to 2. -For example, if there is another network device `ens2` with `vendorID=0x1af4`, it will also have the total net queues set to 2. Similarly, a device with `interfaceName` equal to `eth0` will have total net queues set to 2. diff --git a/modules/codeready-workspaces.adoc b/modules/codeready-workspaces.adoc deleted file mode 100644 index 6c64b30d0670..000000000000 --- a/modules/codeready-workspaces.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * adding_service_cluster/available-services.adoc -// * adding_service_cluster/rosa-available-services.adoc - -[id="codeready-workspaces_{context}"] -= {openshift-dev-spaces-productname} - -The {openshift-dev-spaces-productname} service is available as an add-on to your {product-title} cluster. {openshift-dev-spaces-productname} is a developer tool that makes cloud-native development practical for teams, using Kubernetes and containers to provide any member of the development or IT team with a consistent, preconfigured development environment. Developers can create code, build, and test in containers running on {product-title}. - -[NOTE] -==== -When using this service with {product-title}, {openshift-dev-spaces-productname} can be deployed to any namespace except `openshift-workspaces`. -==== - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_dev_spaces/[{openshift-dev-spaces-productname}] documentation diff --git a/modules/collecting-docker-logs-windows.adoc b/modules/collecting-docker-logs-windows.adoc deleted file mode 100644 index 1f9b42554d59..000000000000 --- a/modules/collecting-docker-logs-windows.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-docker-logs-windows_{context}"] -= Collecting Docker logs for Windows containers - -The Windows Docker service does not stream its logs to stdout, but instead, logs to the event log for Windows. You can view the Docker event logs to investigate issues you think might be caused by the Windows Docker service. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -. SSH into the Windows node and enter PowerShell: -+ -[source,terminal] ----- -C:\> powershell ----- - -. View the Docker logs by running the following command: -+ -[source,terminal] ----- -C:\> Get-EventLog -LogName Application -Source Docker ----- diff --git a/modules/collecting-gitops-debugging-data.adoc b/modules/collecting-gitops-debugging-data.adoc deleted file mode 100644 index 827a869d211c..000000000000 --- a/modules/collecting-gitops-debugging-data.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assembly: -// -// * cicd/gitops/collecting-debugging-data-for-support.adoc - -:_content-type: PROCEDURE -[id="collecting-debugging-data-for-gitops_{context}"] -= Collecting debugging data for {gitops-title} - -Use the `oc adm must-gather` CLI command to collect the following details about the cluster that is associated with {gitops-title}: - -* The subscription and namespace of the {gitops-title} Operator. -* The namespaces where ArgoCD objects are available and the objects in those namespaces, such as `ArgoCD`, `Applications`, `ApplicationSets`, `AppProjects`, and `configmaps`. -* A list of the namespaces that are managed by the {gitops-title} Operator, and resources from those namespaces. -* All {gitops-shortname}-related custom resource objects and definitions. -* Operator and Argo CD logs. -* Warning and error-level events. - -.Prerequisites -* You have logged in to the {product-title} cluster as an administrator. -* You have installed the {product-title} CLI (`oc`). -* You have installed the {gitops-title} Operator. - -.Procedure - -. Navigate to the directory where you want to store the debugging information. -. Run the `oc adm must-gather` command with the {gitops-title} `must-gather` image: -+ -[source,terminal] ----- -$ oc adm must-gather --image=registry.redhat.io/openshift-gitops-1/gitops-must-gather-rhel8:v1.9.0 ----- -+ -The `must-gather` tool creates a new directory that starts with `./must-gather.local` in the current directory. For example, `./must-gather.local.4157245944708210399`. - -. Create a compressed file from the directory that was just created. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar -cvaf must-gather.tar.gz must-gather.local.4157245944708210399 ----- - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. \ No newline at end of file diff --git a/modules/collecting-kube-node-logs-windows.adoc b/modules/collecting-kube-node-logs-windows.adoc deleted file mode 100644 index 5dc1c3a331c2..000000000000 --- a/modules/collecting-kube-node-logs-windows.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-kube-node-logs-windows_{context}"] -= Collecting Kubernetes node logs for Windows containers - -Windows container logging works differently from Linux container logging; the Kubernetes node logs for Windows workloads are streamed to the `C:\var\logs` directory by default. Therefore, you must gather the Windows node logs from that directory. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -. To view the logs under all directories in `C:\var\logs`, run the following command: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path= \ - /ip-10-0-138-252.us-east-2.compute.internal containers \ - /ip-10-0-138-252.us-east-2.compute.internal hybrid-overlay \ - /ip-10-0-138-252.us-east-2.compute.internal kube-proxy \ - /ip-10-0-138-252.us-east-2.compute.internal kubelet \ - /ip-10-0-138-252.us-east-2.compute.internal pods ----- - -. You can now list files in the directories using the same command and view the individual log files. For example, to view the kubelet logs, run the following command: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=/kubelet/kubelet.log ----- diff --git a/modules/collecting-windows-application-event-logs.adoc b/modules/collecting-windows-application-event-logs.adoc deleted file mode 100644 index 349032028392..000000000000 --- a/modules/collecting-windows-application-event-logs.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-windows-application-event-logs_{context}"] -= Collecting Windows application event logs - -The `Get-WinEvent` shim on the kubelet `logs` endpoint can be used to collect application event logs from Windows machines. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -* To view logs from all applications logging to the event logs on the Windows machine, run: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=journal ----- -+ -The same command is executed when collecting logs with `oc adm must-gather`. -+ -Other Windows application logs from the event log can also be collected by specifying the respective service with a `-u` flag. For example, you can run the following command to collect logs for the docker runtime service: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=journal -u docker ----- diff --git a/modules/compliance-anatomy.adoc b/modules/compliance-anatomy.adoc deleted file mode 100644 index c19f299fd4fa..000000000000 --- a/modules/compliance-anatomy.adoc +++ /dev/null @@ -1,346 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -[id="compliance-anatomy_{context}"] -= Anatomy of a scan - -The following sections outline the components and stages of Compliance Operator scans. - -[id="compliance-anatomy-compliance-sources_{context}"] -== Compliance sources -The compliance content is stored in `Profile` objects that are generated from a `ProfileBundle` object. The Compliance Operator creates a `ProfileBundle` object for the cluster and another for the cluster nodes. - -[source,terminal] ----- -$ oc get -n openshift-compliance profilebundle.compliance ----- - -[source,terminal] ----- -$ oc get -n openshift-compliance profile.compliance ----- - -The `ProfileBundle` objects are processed by deployments labeled with the `Bundle` name. To troubleshoot an issue with the `Bundle`, you can find the deployment and view logs of the pods in a deployment: - -[source,terminal] ----- -$ oc logs -n openshift-compliance -lprofile-bundle=ocp4 -c profileparser ----- - -[source,terminal] ----- -$ oc get -n openshift-compliance deployments,pods -lprofile-bundle=ocp4 ----- - -[source,terminal] ----- -$ oc logs -n openshift-compliance pods/ ----- - -[source,terminal] ----- -$ oc describe -n openshift-compliance pod/ -c profileparser ----- - -[id="compliance-anatomy-scan-setting-scan-binding-lifecycle_{context}"] -== The ScanSetting and ScanSettingBinding objects lifecycle and debugging -With valid compliance content sources, the high-level `ScanSetting` and `ScanSettingBinding` objects can be used to generate `ComplianceSuite` and `ComplianceScan` objects: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: my-companys-constraints -debug: true -# For each role, a separate scan will be created pointing -# to a node-role specified in roles -roles: - - worker ---- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: my-companys-compliance-requirements -profiles: - # Node checks - - name: rhcos4-e8 - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 - # Cluster checks - - name: ocp4-e8 - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 -settingsRef: - name: my-companys-constraints - kind: ScanSetting - apiGroup: compliance.openshift.io/v1alpha1 ----- - -Both `ScanSetting` and `ScanSettingBinding` objects are handled by the same controller tagged with `logger=scansettingbindingctrl`. These objects have no status. Any issues are communicated in form of events: - -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuiteCreated 9m52s scansettingbindingctrl ComplianceSuite openshift-compliance/my-companys-compliance-requirements created ----- - -Now a `ComplianceSuite` object is created. The flow continues to reconcile the newly created `ComplianceSuite`. - -[id="compliance-suite-lifecycle-debugging_{context}"] -== ComplianceSuite custom resource lifecycle and debugging -The `ComplianceSuite` CR is a wrapper around `ComplianceScan` CRs. The `ComplianceSuite` CR is handled by controller tagged with `logger=suitectrl`. -This controller handles creating scans from a suite, reconciling and aggregating individual Scan statuses into a single Suite status. If a suite is set to execute periodically, the `suitectrl` also handles creating a `CronJob` CR that re-runs the scans in the suite after the initial run is done: - -[source,terminal] ----- -$ oc get cronjobs ----- - -.Example output -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE - 0 1 * * * False 0 151m ----- - -For the most important issues, events are emitted. View them with `oc describe compliancesuites/`. The `Suite` objects also have a `Status` subresource that is updated when any of `Scan` objects that belong to this suite update their `Status` subresource. After all expected scans are created, control is passed to the scan controller. - -[id="compliance-scan-lifecycle-debugging_{context}"] -== ComplianceScan custom resource lifecycle and debugging -The `ComplianceScan` CRs are handled by the `scanctrl` controller. This is also where the actual scans happen and the scan results are created. Each scan goes through several phases: - -[id="compliance-scan-pending-phase_{context}"] -=== Pending phase -The scan is validated for correctness in this phase. If some parameters like storage size are invalid, the scan transitions to DONE with ERROR result, otherwise proceeds to the Launching phase. - -[id="compliance-scan-launching-phase_{context}"] -=== Launching phase -In this phase, several config maps that contain either environment for the scanner pods or directly the script that the scanner pods will be evaluating. List the config maps: - -[source,terminal] ----- -$ oc -n openshift-compliance get cm \ --l compliance.openshift.io/scan-name=rhcos4-e8-worker,complianceoperator.openshift.io/scan-script= ----- - -These config maps will be used by the scanner pods. If you ever needed to modify the scanner behavior, change the scanner debug level or print the raw results, modifying the config maps is the way to go. Afterwards, a persistent volume claim is created per scan to store the raw ARF results: - -[source,terminal] ----- -$ oc get pvc -n openshift-compliance -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -The PVCs are mounted by a per-scan `ResultServer` deployment. A `ResultServer` is a simple HTTP server where the individual scanner pods upload the full ARF results to. Each server can run on a different node. The full ARF results might be very large and you cannot presume that it would be possible to create a volume that could be mounted from multiple nodes at the same time. After the scan is finished, the `ResultServer` deployment is scaled down. The PVC with the raw results can be mounted from another custom pod and the results can be fetched or inspected. The traffic between the scanner pods and the `ResultServer` is protected by mutual TLS protocols. - -Finally, the scanner pods are launched in this phase; one scanner pod for a `Platform` scan instance and one scanner pod per matching node for a `node` scan instance. The per-node pods are labeled with the node name. Each pod is always labeled with the `ComplianceScan` name: - -[source,terminal] ----- -$ oc get pods -lcompliance.openshift.io/scan-name=rhcos4-e8-worker,workload=scanner --show-labels ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE LABELS -rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod 0/2 Completed 0 39m compliance.openshift.io/scan-name=rhcos4-e8-worker,targetNode=ip-10-0-169-90.eu-north-1.compute.internal,workload=scanner ----- -+ -The scan then proceeds to the Running phase. - -[id="compliance-scan-running-phase_{context}"] -=== Running phase -The running phase waits until the scanner pods finish. The following terms and processes are in use in the running phase: - -* *init container*: There is one init container called `content-container`. It runs the *contentImage* container and executes a single command that copies the *contentFile* to the `/content` directory shared with the other containers in this pod. - -* *scanner*: This container runs the scan. For node scans, the container mounts the node filesystem as `/host` and mounts the content delivered by the init container. The container also mounts the `entrypoint` `ConfigMap` created in the Launching phase and executes it. The default script in the entrypoint `ConfigMap` executes OpenSCAP and stores the result files in the `/results` directory shared between the pod's containers. Logs from this pod can be viewed to determine what the OpenSCAP scanner checked. More verbose output can be viewed with the `debug` flag. - -* *logcollector*: The logcollector container waits until the scanner container finishes. Then, it uploads the full ARF results to the `ResultServer` and separately uploads the XCCDF results along with scan result and OpenSCAP result code as a `ConfigMap.` These result config maps are labeled with the scan name (`compliance.openshift.io/scan-name=rhcos4-e8-worker`): -+ -[source,terminal] ----- -$ oc describe cm/rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod ----- -+ -.Example output -[source,terminal] ----- - Name: rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod - Namespace: openshift-compliance - Labels: compliance.openshift.io/scan-name-scan=rhcos4-e8-worker - complianceoperator.openshift.io/scan-result= - Annotations: compliance-remediations/processed: - compliance.openshift.io/scan-error-msg: - compliance.openshift.io/scan-result: NON-COMPLIANT - OpenSCAP-scan-result/node: ip-10-0-169-90.eu-north-1.compute.internal - - Data - ==== - exit-code: - ---- - 2 - results: - ---- - - ... ----- - -Scanner pods for `Platform` scans are similar, except: - -* There is one extra init container called `api-resource-collector` that reads the OpenSCAP content provided by the content-container init, container, figures out which API resources the content needs to examine and stores those API resources to a shared directory where the `scanner` container would read them from. - -* The `scanner` container does not need to mount the host file system. - -When the scanner pods are done, the scans move on to the Aggregating phase. - -[id="compliance-scan-aggregating-phase_{context}"] -=== Aggregating phase -In the aggregating phase, the scan controller spawns yet another pod called the aggregator pod. Its purpose it to take the result `ConfigMap` objects, read the results and for each check result create the corresponding Kubernetes object. If the check failure can be automatically remediated, a `ComplianceRemediation` object is created. To provide human-readable metadata for the checks and remediations, the aggregator pod also mounts the OpenSCAP content using an init container. - -When a config map is processed by an aggregator pod, it is labeled the `compliance-remediations/processed` label. The result of this phase are `ComplianceCheckResult` objects: - -[source,terminal] ----- -$ oc get compliancecheckresults -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -rhcos4-e8-worker-accounts-no-uid-except-zero PASS high -rhcos4-e8-worker-audit-rules-dac-modification-chmod FAIL medium ----- -and `ComplianceRemediation` objects: - -[source,terminal] ----- -$ oc get complianceremediations -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -.Example output -[source,terminal] ----- -NAME STATE -rhcos4-e8-worker-audit-rules-dac-modification-chmod NotApplied -rhcos4-e8-worker-audit-rules-dac-modification-chown NotApplied -rhcos4-e8-worker-audit-rules-execution-chcon NotApplied -rhcos4-e8-worker-audit-rules-execution-restorecon NotApplied -rhcos4-e8-worker-audit-rules-execution-semanage NotApplied -rhcos4-e8-worker-audit-rules-execution-setfiles NotApplied ----- - -After these CRs are created, the aggregator pod exits and the scan moves on to the Done phase. - -[id="compliance-scan-done-phase_{context}"] -=== Done phase -In the final scan phase, the scan resources are cleaned up if needed and the `ResultServer` deployment is either scaled down (if the scan was one-time) or deleted if the scan is continuous; the next scan instance would then recreate the deployment again. - -It is also possible to trigger a re-run of a scan in the Done phase by annotating it: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -After the scan reaches the Done phase, nothing else happens on its own unless the remediations are set to be applied automatically with `autoApplyRemediations: true`. The {product-title} administrator would now review the remediations and apply them as needed. If the remediations are set to be applied automatically, the `ComplianceSuite` controller takes over in the Done phase, pauses the machine config pool to which the scan maps to and applies all the remediations in one go. If a remediation is applied, the `ComplianceRemediation` controller takes over. - -[id="compliance-remediation-lifecycle-debugging_{context}"] -== ComplianceRemediation controller lifecycle and debugging -The example scan has reported some findings. One of the remediations can be enabled by toggling its `apply` attribute to `true`: - -[source,terminal] ----- -$ oc patch complianceremediations/rhcos4-e8-worker-audit-rules-dac-modification-chmod --patch '{"spec":{"apply":true}}' --type=merge ----- - -The `ComplianceRemediation` controller (`logger=remediationctrl`) reconciles the modified object. The result of the reconciliation is change of status of the remediation object that is reconciled, but also a change of the rendered per-suite `MachineConfig` object that contains all the applied remediations. - -The `MachineConfig` object always begins with `75-` and is named after the scan and the suite: - -[source,terminal] ----- -$ oc get mc | grep 75- ----- - -.Example output -[source,terminal] ----- -75-rhcos4-e8-worker-my-companys-compliance-requirements 3.2.0 2m46s ----- - -The remediations the `mc` currently consists of are listed in the machine config's annotations: - -[source,terminal] ----- -$ oc describe mc/75-rhcos4-e8-worker-my-companys-compliance-requirements ----- - -.Example output -[source,terminal] ----- -Name: 75-rhcos4-e8-worker-my-companys-compliance-requirements -Labels: machineconfiguration.openshift.io/role=worker -Annotations: remediation/rhcos4-e8-worker-audit-rules-dac-modification-chmod: ----- - -The `ComplianceRemediation` controller's algorithm works like this: - -* All currently applied remediations are read into an initial remediation set. -* If the reconciled remediation is supposed to be applied, it is added to the set. -* A `MachineConfig` object is rendered from the set and annotated with names of remediations in the set. If the set is empty (the last remediation was unapplied), the rendered `MachineConfig` object is removed. -* If and only if the rendered machine config is different from the one already applied in the cluster, the applied MC is updated (or created, or deleted). -* Creating or modifying a `MachineConfig` object triggers a reboot of nodes that match the `machineconfiguration.openshift.io/role` label - see the Machine Config Operator documentation for more details. - -The remediation loop ends once the rendered machine config is updated, if needed, and the reconciled remediation object status is updated. In our case, applying the remediation would trigger a reboot. After the reboot, annotate the scan to re-run it: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -The scan will run and finish. Check for the remediation to pass: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -get compliancecheckresults/rhcos4-e8-worker-audit-rules-dac-modification-chmod ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -rhcos4-e8-worker-audit-rules-dac-modification-chmod PASS medium ----- - -[id="compliance-operator-useful-labels_{context}"] -== Useful labels - -Each pod that is spawned by the Compliance Operator is labeled specifically with the scan it belongs to and the work it does. The scan identifier is labeled with the `compliance.openshift.io/scan-name` label. The workload identifier is labeled with the `workload` label. - -The Compliance Operator schedules the following workloads: - -* *scanner*: Performs the compliance scan. - -* *resultserver*: Stores the raw results for the compliance scan. - -* *aggregator*: Aggregates the results, detects inconsistencies and outputs result objects (checkresults and remediations). - -* *suitererunner*: Will tag a suite to be re-run (when a schedule is set). - -* *profileparser*: Parses a datastream and creates the appropriate profiles, rules and variables. - -When debugging and logs are required for a certain workload, run: - -[source,terminal] ----- -$ oc logs -l workload= -c ----- diff --git a/modules/compliance-apply-remediation-for-customized-mcp.adoc b/modules/compliance-apply-remediation-for-customized-mcp.adoc deleted file mode 100644 index 1483b24e8682..000000000000 --- a/modules/compliance-apply-remediation-for-customized-mcp.adoc +++ /dev/null @@ -1,74 +0,0 @@ -:_content-type: PROCEDURE -[id="compliance-operator-apply-remediation-for-customized-mcp"] -= Applying remediation when using customized machine config pools - -When you create a custom `MachineConfigPool`, add a label to the `MachineConfigPool` so that `machineConfigPoolSelector` present in the `KubeletConfig` can match the label with `MachineConfigPool`. - -[IMPORTANT] -==== -Do not set `protectKernelDefaults: false` in the `KubeletConfig` file, because the `MachineConfigPool` object might fail to unpause unexpectedly after the Compliance Operator finishes applying remediation. -==== - -.Procedure - -. List the nodes. -+ -[source,terminal] ----- -$ oc get nodes -n openshift-compliance ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-128-92.us-east-2.compute.internal Ready master 5h21m v1.27.3 -ip-10-0-158-32.us-east-2.compute.internal Ready worker 5h17m v1.27.3 -ip-10-0-166-81.us-east-2.compute.internal Ready worker 5h17m v1.27.3 -ip-10-0-171-170.us-east-2.compute.internal Ready master 5h21m v1.27.3 -ip-10-0-197-35.us-east-2.compute.internal Ready master 5h22m v1.27.3 ----- - -. Add a label to nodes. -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -label node ip-10-0-166-81.us-east-2.compute.internal \ -node-role.kubernetes.io/= ----- -+ -.Example output -+ -[source,terminal] ----- -node/ip-10-0-166-81.us-east-2.compute.internal labeled ----- - -. Create custom `MachineConfigPool` CR. -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: - labels: - pools.operator.machineconfiguration.openshift.io/: '' <1> -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,]} - nodeSelector: - matchLabels: - node-role.kubernetes.io/: "" ----- -<1> The `labels` field defines label name to add for Machine config pool(MCP). - -. Verify MCP created successfully. -+ -[source,terminal] ----- -$ oc get mcp -w ----- diff --git a/modules/compliance-apply-remediations-from-scans.adoc b/modules/compliance-apply-remediations-from-scans.adoc deleted file mode 100644 index 712384bcf369..000000000000 --- a/modules/compliance-apply-remediations-from-scans.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-cli_{context}"] -= Applying remediations generated by suite scans - -Although you can use the `autoApplyRemediations` boolean parameter in a `ComplianceSuite` object, you can alternatively annotate the object with `compliance.openshift.io/apply-remediations`. This allows the Operator to apply all of the created remediations. - -.Procedure - -* Apply the `compliance.openshift.io/apply-remediations` annotation by running: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancesuites/workers-compliancesuite compliance.openshift.io/apply-remediations= ----- diff --git a/modules/compliance-applying-resource-requests-and-limits.adoc b/modules/compliance-applying-resource-requests-and-limits.adoc deleted file mode 100644 index 00f52953e297..000000000000 --- a/modules/compliance-applying-resource-requests-and-limits.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-applying-resource-requests-and-limits_{context}"] -= Applying resource requests and limits - -When the kubelet starts a container as part of a Pod, the kubelet passes that container's requests and limits for memory and CPU to the container runtime. In Linux, the container runtime configures the kernel cgroups that apply and enforce the limits you defined. - -The CPU limit defines how much CPU time the container can use. During each scheduling interval, the Linux kernel checks to see if this limit is exceeded. If so, the kernel waits before allowing the cgroup to resume execution. - -If several different containers (cgroups) want to run on a contended system, workloads with larger CPU requests are allocated more CPU time than workloads with small requests. The memory request is used during Pod scheduling. On a node that uses cgroups v2, the container runtime might use the memory request as a hint to set `memory.min` and `memory.low` values. - -If a container attempts to allocate more memory than this limit, the Linux kernel out-of-memory subsystem activates and intervenes by stopping one of the processes in the container that tried to allocate memory. The memory limit for the Pod or container can also apply to pages in memory-backed volumes, such as an emptyDir. - -The kubelet tracks `tmpfs` `emptyDir` volumes as container memory is used, rather than as local ephemeral storage. If a container exceeds its memory request and the node that it runs on becomes short of memory overall, the Pod's container might be evicted. - -[IMPORTANT] -==== -A container may not exceed its CPU limit for extended periods. Container run times do not stop Pods or containers for excessive CPU usage. To determine whether a container cannot be scheduled or is being killed due to resource limits, see _Troubleshooting the Compliance Operator_. -==== diff --git a/modules/compliance-applying.adoc b/modules/compliance-applying.adoc deleted file mode 100644 index d448f850ff83..000000000000 --- a/modules/compliance-applying.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -[id="compliance-applying_{context}"] -= Applying a remediation - -The boolean attribute `spec.apply` controls whether the remediation should be applied by the Compliance Operator. You can apply the remediation by setting the attribute to `true`: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -patch complianceremediations/-sysctl-net-ipv4-conf-all-accept-redirects \ ---patch '{"spec":{"apply":true}}' --type=merge ----- - -After the Compliance Operator processes the applied remediation, the `status.ApplicationState` attribute would change to *Applied* or to *Error* if incorrect. When a machine config remediation is applied, that remediation along with all other applied remediations are rendered into a `MachineConfig` object named `75-$scan-name-$suite-name`. That `MachineConfig` object is subsequently rendered by the Machine Config Operator and finally applied to all the nodes in a machine config pool by an instance of the machine control daemon running on each node. - -Note that when the Machine Config Operator applies a new `MachineConfig` object to nodes in a pool, all the nodes belonging to the pool are rebooted. This might be inconvenient when applying multiple remediations, each of which re-renders the composite `75-$scan-name-$suite-name` `MachineConfig` object. To prevent applying the remediation immediately, you can pause the machine config pool by setting the `.spec.paused` attribute of a `MachineConfigPool` object to `true`. - -The Compliance Operator can apply remediations automatically. Set `autoApplyRemediations: true` in the `ScanSetting` top-level object. - -[WARNING] -==== -Applying remediations automatically should only be done with careful consideration. -==== diff --git a/modules/compliance-auto-update-remediations.adoc b/modules/compliance-auto-update-remediations.adoc deleted file mode 100644 index 90e920ed854a..000000000000 --- a/modules/compliance-auto-update-remediations.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="automatically-update-remediations_{context}"] -= Automatically update remediations - -In some cases, a scan with newer content might mark remediations as `OUTDATED`. As an administrator, you can apply the `compliance.openshift.io/remove-outdated` annotation to apply new remediations and remove the outdated ones. - -.Procedure - -* Apply the `compliance.openshift.io/remove-outdated` annotation: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancesuites/workers-compliancesuite compliance.openshift.io/remove-outdated= ----- - -Alternatively, set the `autoUpdateRemediations` flag in a `ScanSetting` or `ComplianceSuite` object to update the remediations automatically. diff --git a/modules/compliance-crd-advanced-compliance-scan.adoc b/modules/compliance-crd-advanced-compliance-scan.adoc deleted file mode 100644 index 8ae9e5acfd84..000000000000 --- a/modules/compliance-crd-advanced-compliance-scan.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="advance-compliance-scan-object_{context}"] -= Advanced ComplianceScan Object -The Compliance Operator includes options for advanced users for debugging or integrating with existing tooling. While it is recommended that you not create a `ComplianceScan` object directly, you can instead manage it using a `ComplianceSuite` object. - -.Example Advanced `ComplianceScan` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceScan -metadata: - name: -spec: - scanType: Node <1> - profile: xccdf_org.ssgproject.content_profile_moderate <2> - content: ssg-ocp4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest <3> - rule: "xccdf_org.ssgproject.content_rule_no_netrc_files" <4> - nodeSelector: <5> - node-role.kubernetes.io/worker: "" -status: - phase: DONE <6> - result: NON-COMPLIANT <7> ----- - -<1> Specify either `Node` or `Platform`. Node profiles scan the cluster nodes and platform profiles scan the Kubernetes platform. -<2> Specify the XCCDF identifier of the profile that you want to run. -<3> Specify the container image that encapsulates the profile files. -<4> It is optional. Specify the scan to run a single rule. This rule has to be identified with the XCCDF ID, and has to belong to the specified profile. -+ -[NOTE] -==== -If you skip the `rule` parameter, then scan runs for all the available rules of the specified profile. -==== -<5> If you are on the {product-title} and wants to generate a remediation, then nodeSelector label has to match the `MachineConfigPool` label. -+ -[NOTE] -==== -If you do not specify `nodeSelector` parameter or match the `MachineConfig` label, scan will still run, but it will not create remediation. -==== -<6> Indicates the current phase of the scan. -<7> Indicates the verdict of the scan. - -[IMPORTANT] -==== -If you delete a `ComplianceSuite` object, then all the associated scans get deleted. -==== - -When the scan is complete, it generates the result as Custom Resources of the `ComplianceCheckResult` object. However, the raw results are available in ARF format. These results are stored in a Persistent Volume (PV), which has a Persistent Volume Claim (PVC) associated with the name of the scan. -You can programmatically fetch the `ComplianceScans` events. To generate events for the suite, run the following command: - -[source,terminal] ----- -oc get events --field-selector involvedObject.kind=ComplianceScan,involvedObject.name= ----- diff --git a/modules/compliance-crd-compliance-check-result.adoc b/modules/compliance-crd-compliance-check-result.adoc deleted file mode 100644 index c5ff012bb21d..000000000000 --- a/modules/compliance-crd-compliance-check-result.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-check-result_{context}"] -= ComplianceCheckResult object -When you run a scan with a specific profile, several rules in the profiles are verified. For each of these rules, a `ComplianceCheckResult` object is created, which provides the state of the cluster for a specific rule. - -.Example `ComplianceCheckResult` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceCheckResult -metadata: - labels: - compliance.openshift.io/check-severity: medium - compliance.openshift.io/check-status: FAIL - compliance.openshift.io/suite: example-compliancesuite - compliance.openshift.io/scan-name: workers-scan - name: workers-scan-no-direct-root-logins - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceScan - name: workers-scan -description: -instructions: -id: xccdf_org.ssgproject.content_rule_no_direct_root_logins -severity: medium <1> -status: FAIL <2> ----- - -<1> Describes the severity of the scan check. -<2> Describes the result of the check. The possible values are: -* PASS: check was successful. -* FAIL: check was unsuccessful. -* INFO: check was successful and found something not severe enough to be considered an error. -* MANUAL: check cannot automatically assess the status and manual check is required. -* INCONSISTENT: different nodes report different results. -* ERROR: check run successfully, but could not complete. -* NOTAPPLICABLE: check did not run as it is not applicable. - -To get all the check results from a suite, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l compliance.openshift.io/suite=workers-compliancesuite ----- diff --git a/modules/compliance-crd-compliance-remediation.adoc b/modules/compliance-crd-compliance-remediation.adoc deleted file mode 100644 index 9a8341e17f4d..000000000000 --- a/modules/compliance-crd-compliance-remediation.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-remediation-object_{context}"] -= ComplianceRemediation object -For a specific check you can have a datastream specified fix. However, if a Kubernetes fix is available, then the Compliance Operator creates a `ComplianceRemediation` object. - -.Example `ComplianceRemediation` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceRemediation -metadata: - labels: - compliance.openshift.io/suite: example-compliancesuite - compliance.openshift.io/scan-name: workers-scan - machineconfiguration.openshift.io/role: worker - name: workers-scan-disable-users-coredumps - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceCheckResult - name: workers-scan-disable-users-coredumps - uid: -spec: - apply: false <1> - object: - current: <2> - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - spec: - config: - ignition: - version: 2.2.0 - storage: - files: - - contents: - source: data:,%2A%20%20%20%20%20hard%20%20%20core%20%20%20%200 - filesystem: root - mode: 420 - path: /etc/security/limits.d/75-disable_users_coredumps.conf - outdated: {} <3> ----- - -<1> `true` indicates the remediation was applied. `false` indicates the remediation was not applied. -<2> Includes the definition of the remediation. -<3> Indicates remediation that was previously parsed from an earlier version of the content. The Compliance Operator still retains the outdated objects to give the administrator a chance to review the new remediations before applying them. - -To get all the remediations from a suite, run the following command: -[source,terminal] ----- -oc get complianceremediations \ --l compliance.openshift.io/suite=workers-compliancesuite ----- - -To list all failing checks that can be remediated automatically, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l 'compliance.openshift.io/check-status in (FAIL),compliance.openshift.io/automated-remediation' ----- - -To list all failing checks that can be remediated manually, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l 'compliance.openshift.io/check-status in (FAIL),!compliance.openshift.io/automated-remediation' ----- diff --git a/modules/compliance-crd-compliance-suite.adoc b/modules/compliance-crd-compliance-suite.adoc deleted file mode 100644 index 372d5a2288f6..000000000000 --- a/modules/compliance-crd-compliance-suite.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-suite-object_{context}"] -= ComplianceSuite object -The `ComplianceSuite` object helps you keep track of the state of the scans. It contains the raw settings to create scans and the overall result. - -For `Node` type scans, you should map the scan to the `MachineConfigPool`, since it contains the remediations for any issues. If you specify a label, ensure it directly applies to a pool. - -.Example `ComplianceSuite` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: -spec: - autoApplyRemediations: false <1> - schedule: "0 1 * * *" <2> - scans: <3> - - name: workers-scan - scanType: Node - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - rule: "xccdf_org.ssgproject.content_rule_no_netrc_files" - nodeSelector: - node-role.kubernetes.io/worker: "" -status: - Phase: DONE <4> - Result: NON-COMPLIANT <5> - scanStatuses: - - name: workers-scan - phase: DONE - result: NON-COMPLIANT ----- -<1> Set to `true` to enable auto remediations. Set to `false` to disable auto remediations. -<2> Specify how often the scan should be run in cron format. -<3> Specify a list of scan specifications to run in the cluster. -<4> Indicates the progress of the scans. -<5> Indicates the overall verdict of the suite. - -The suite in the background creates the `ComplianceScan` object based on the `scans` parameter. -You can programmatically fetch the `ComplianceSuites` events. To get the events for the suite, run the following command: -[source,terminal] ----- -$ oc get events --field-selector involvedObject.kind=ComplianceSuite,involvedObject.name= ----- - -[IMPORTANT] -==== -You might create errors when you manually define the `ComplianceSuite`, since it contains the XCCDF attributes. -==== diff --git a/modules/compliance-crd-profile-bundle.adoc b/modules/compliance-crd-profile-bundle.adoc deleted file mode 100644 index 971f4ceb9beb..000000000000 --- a/modules/compliance-crd-profile-bundle.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="profile-bundle-object_{context}"] -= ProfileBundle object -When you install the Compliance Operator, it includes ready-to-run `ProfileBundle` objects. The Compliance Operator parses the `ProfileBundle` object and creates a `Profile` object for each profile in the bundle. It also parses `Rule` and `Variable` objects, which are used by the `Profile` object. - - -.Example `ProfileBundle` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle - name: - namespace: openshift-compliance -status: - dataStreamStatus: VALID <1> ----- -<1> Indicates whether the Compliance Operator was able to parse the content files. - -[NOTE] -==== -When the `contentFile` fails, an `errorMessage` attribute appears, which provides details of the error that occurred. -==== - -.Troubleshooting - -When you roll back to a known content image from an invalid image, the `ProfileBundle` object stops responding and displays `PENDING` state. As a workaround, you can move to a different image than the previous one. Alternatively, you can delete and re-create the `ProfileBundle` object to return to the working state. diff --git a/modules/compliance-crd-profile.adoc b/modules/compliance-crd-profile.adoc deleted file mode 100644 index 9dbb9ba919e4..000000000000 --- a/modules/compliance-crd-profile.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="profile-object_{context}"] -= Profile object - -The `Profile` object defines the rules and variables that can be evaluated for a certain compliance standard. It contains parsed out details about an OpenSCAP profile, such as its XCCDF identifier and profile checks for a `Node` or `Platform` type. You can either directly use the `Profile` object or further customize it using a `TailorProfile` object. - -[NOTE] -==== -You cannot create or modify the `Profile` object manually because it is derived from a single `ProfileBundle` object. Typically, a single `ProfileBundle` object can include several `Profile` objects. -==== - -.Example `Profile` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -description: -id: xccdf_org.ssgproject.content_profile_moderate <1> -kind: Profile -metadata: - annotations: - compliance.openshift.io/product: - compliance.openshift.io/product-type: Node <2> - creationTimestamp: "YYYY-MM-DDTMM:HH:SSZ" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: - name: rhcos4-moderate - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: - uid: - resourceVersion: "" - selfLink: /apis/compliance.openshift.io/v1alpha1/namespaces/openshift-compliance/profiles/rhcos4-moderate - uid: -rules: <3> -- rhcos4-account-disable-post-pw-expiration -- rhcos4-accounts-no-uid-except-zero -- rhcos4-audit-rules-dac-modification-chmod -- rhcos4-audit-rules-dac-modification-chown -title: ----- -<1> Specify the XCCDF name of the profile. Use this identifier when you define a `ComplianceScan` object as the value of the profile attribute of the scan. -<2> Specify either a `Node` or `Platform`. Node profiles scan the cluster nodes and platform profiles scan the Kubernetes platform. -<3> Specify the list of rules for the profile. Each rule corresponds to a single check. diff --git a/modules/compliance-crd-rule.adoc b/modules/compliance-crd-rule.adoc deleted file mode 100644 index 1ee2b9c4608b..000000000000 --- a/modules/compliance-crd-rule.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="rule-object_{context}"] -= Rule object -The `Rule` object, which forms the profiles, are also exposed as objects. Use the `Rule` object to define your compliance check requirements and specify how it could be fixed. - -.Example `Rule` object -[source,yaml] ----- - apiVersion: compliance.openshift.io/v1alpha1 - checkType: Platform <1> - description: <description of the rule> - id: xccdf_org.ssgproject.content_rule_configure_network_policies_namespaces <2> - instructions: <manual instructions for the scan> - kind: Rule - metadata: - annotations: - compliance.openshift.io/rule: configure-network-policies-namespaces - control.compliance.openshift.io/CIS-OCP: 5.3.2 - control.compliance.openshift.io/NERC-CIP: CIP-003-3 R4;CIP-003-3 R4.2;CIP-003-3 - R5;CIP-003-3 R6;CIP-004-3 R2.2.4;CIP-004-3 R3;CIP-007-3 R2;CIP-007-3 R2.1;CIP-007-3 - R2.2;CIP-007-3 R2.3;CIP-007-3 R5.1;CIP-007-3 R6.1 - control.compliance.openshift.io/NIST-800-53: AC-4;AC-4(21);CA-3(5);CM-6;CM-6(1);CM-7;CM-7(1);SC-7;SC-7(3);SC-7(5);SC-7(8);SC-7(12);SC-7(13);SC-7(18) - labels: - compliance.openshift.io/profile-bundle: ocp4 - name: ocp4-configure-network-policies-namespaces - namespace: openshift-compliance - rationale: <description of why this rule is checked> - severity: high <3> - title: <summary of the rule> ----- -<1> Specify the type of check this rule executes. `Node` profiles scan the cluster nodes and `Platform` profiles scan the Kubernetes platform. An empty value indicates there is no automated check. -<2> Specify the XCCDF name of the rule, which is parsed directly from the datastream. -<3> Specify the severity of the rule when it fails. - -[NOTE] -==== -The `Rule` object gets an appropriate label for an easy identification of the associated `ProfileBundle` object. The `ProfileBundle` also gets specified in the `OwnerReferences` of this object. -==== diff --git a/modules/compliance-crd-scan-setting-binding.adoc b/modules/compliance-crd-scan-setting-binding.adoc deleted file mode 100644 index 974d0fa58350..000000000000 --- a/modules/compliance-crd-scan-setting-binding.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="scan-setting-binding-object_{context}"] -= ScanSettingBinding object - -Use the `ScanSettingBinding` object to specify your compliance requirements with reference to the `Profile` or `TailoredProfile` object. It is then linked to a `ScanSetting` object, which provides the operational constraints for the scan. Then the Compliance Operator generates the `ComplianceSuite` object based on the `ScanSetting` and `ScanSettingBinding` objects. - -.Example `ScanSettingBinding` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: <name of the scan> -profiles: <1> - # Node checks - - name: rhcos4-with-usb - kind: TailoredProfile - apiGroup: compliance.openshift.io/v1alpha1 - # Cluster checks - - name: ocp4-moderate - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 -settingsRef: <2> - name: my-companys-constraints - kind: ScanSetting - apiGroup: compliance.openshift.io/v1alpha1 ----- - -<1> Specify the details of `Profile` or `TailoredProfile` object to scan your environment. -<2> Specify the operational constraints, such as schedule and storage size. - -The creation of `ScanSetting` and `ScanSettingBinding` objects results in the compliance suite. To get the list of compliance suite, run the following command: -[source,terminal] ----- -$ oc get compliancesuites ----- - -[IMPORTANT] -==== -If you delete `ScanSettingBinding`, then compliance suite also is deleted. -==== diff --git a/modules/compliance-crd-scan-setting.adoc b/modules/compliance-crd-scan-setting.adoc deleted file mode 100644 index db839f74974c..000000000000 --- a/modules/compliance-crd-scan-setting.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="scan-setting-object_{context}"] -= ScanSetting object -Use the `ScanSetting` object to define and reuse the operational policies to run your scans. -By default, the Compliance Operator creates the following `ScanSetting` objects: - -* *default* - it runs a scan every day at 1 AM on both master and worker nodes using a 1Gi Persistent Volume (PV) and keeps the last three results. Remediation is neither applied nor updated automatically. -* *default-auto-apply* - it runs a scan every day at 1AM on both control plane and worker nodes using a 1Gi Persistent Volume (PV) and keeps the last three results. Both `autoApplyRemediations` and `autoUpdateRemediations` are set to true. - -.Example `ScanSetting` object -[source,yaml] ----- -Name: default-auto-apply -Namespace: openshift-compliance -Labels: <none> -Annotations: <none> -API Version: compliance.openshift.io/v1alpha1 -Auto Apply Remediations: true -Auto Update Remediations: true -Kind: ScanSetting -Metadata: - Creation Timestamp: 2022-10-18T20:21:00Z - Generation: 1 - Managed Fields: - API Version: compliance.openshift.io/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:autoApplyRemediations: <1> - f:autoUpdateRemediations: <2> - f:rawResultStorage: - .: - f:nodeSelector: - .: - f:node-role.kubernetes.io/master: - f:pvAccessModes: - f:rotation: - f:size: - f:tolerations: - f:roles: - f:scanTolerations: - f:schedule: - f:showNotApplicable: - f:strictNodeScan: - Manager: compliance-operator - Operation: Update - Time: 2022-10-18T20:21:00Z - Resource Version: 38840 - UID: 8cb0967d-05e0-4d7a-ac1c-08a7f7e89e84 -Raw Result Storage: - Node Selector: - node-role.kubernetes.io/master: - Pv Access Modes: - ReadWriteOnce - Rotation: 3 <3> - Size: 1Gi <4> - Tolerations: - Effect: NoSchedule - Key: node-role.kubernetes.io/master - Operator: Exists - Effect: NoExecute - Key: node.kubernetes.io/not-ready - Operator: Exists - Toleration Seconds: 300 - Effect: NoExecute - Key: node.kubernetes.io/unreachable - Operator: Exists - Toleration Seconds: 300 - Effect: NoSchedule - Key: node.kubernetes.io/memory-pressure - Operator: Exists -Roles: <6> - master - worker -Scan Tolerations: - Operator: Exists -Schedule: "0 1 * * *" <5> -Show Not Applicable: false -Strict Node Scan: true -Events: <none> ----- -<1> Set to `true` to enable auto remediations. Set to `false` to disable auto remediations. -<2> Set to `true` to enable auto remediations for content updates. Set to `false` to disable auto remediations for content updates. -<3> Specify the number of stored scans in the raw result format. The default value is `3`. As the older results get rotated, the administrator must store the results elsewhere before the rotation happens. -<4> Specify the storage size that should be created for the scan to store the raw results. The default value is `1Gi` -<5> Specify how often the scan should be run in cron format. -+ -[NOTE] -==== -To disable the rotation policy, set the value to `0`. -==== -<6> Specify the `node-role.kubernetes.io` label value to schedule the scan for `Node` type. This value has to match the name of a `MachineConfigPool`. diff --git a/modules/compliance-crd-tailored-profile.adoc b/modules/compliance-crd-tailored-profile.adoc deleted file mode 100644 index 96408f985628..000000000000 --- a/modules/compliance-crd-tailored-profile.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="tailored-profile-object_{context}"] -= TailoredProfile object - -Use the `TailoredProfile` object to modify the default `Profile` object based on your organization requirements. You can enable or disable rules, set variable values, and provide justification for the customization. After validation, the `TailoredProfile` object creates a `ConfigMap`, which can be referenced by a `ComplianceScan` object. - -[TIP] -==== -You can use the `TailoredProfile` object by referencing it in a `ScanSettingBinding` object. For more information about `ScanSettingBinding`, see ScanSettingBinding object. -==== - -.Example `TailoredProfile` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: rhcos4-with-usb -spec: - extends: rhcos4-moderate <1> - title: <title of the tailored profile> - disableRules: - - name: <name of a rule object to be disabled> - rationale: <description of why this rule is checked> -status: - id: xccdf_compliance.openshift.io_profile_rhcos4-with-usb <2> - outputRef: - name: rhcos4-with-usb-tp <3> - namespace: openshift-compliance - state: READY <4> ----- - -<1> This is optional. Name of the `Profile` object upon which the `TailoredProfile` is built. If no value is set, a new profile is created from the `enableRules` list. -<2> Specifies the XCCDF name of the tailored profile. -<3> Specifies the `ConfigMap` name, which can be used as the value of the `tailoringConfigMap.name` attribute of a `ComplianceScan`. -<4> Shows the state of the object such as `READY`, `PENDING`, and `FAILURE`. If the state of the object is `ERROR`, then the attribute `status.errorMessage` provides the reason for the failure. - -With the `TailoredProfile` object, it is possible to create a new `Profile` object using the `TailoredProfile` construct. To create a new `Profile`, set the following configuration parameters : - -* an appropriate title -* `extends` value must be empty -* scan type annotation on the `TailoredProfile` object: -+ -[source,yaml] ----- -compliance.openshift.io/product-type: Platform/Node ----- -+ -[NOTE] -==== -If you have not set the `product-type` annotation, the Compliance Operator defaults to `Platform` scan type. Adding the `-node` suffix to the name of the `TailoredProfile` object results in `node` scan type. -==== diff --git a/modules/compliance-crd-workflow.adoc b/modules/compliance-crd-workflow.adoc deleted file mode 100644 index a5f4cff79971..000000000000 --- a/modules/compliance-crd-workflow.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="custom-resource-definitions-workflow_{context}"] -= CRDs workflow - -The CRD provides you the following workflow to complete the compliance scans: - -. Define your compliance scan requirements -. Configure the compliance scan settings -. Process compliance requirements with compliance scans settings -. Monitor the compliance scans -. Check the compliance scan results diff --git a/modules/compliance-custom-node-pools.adoc b/modules/compliance-custom-node-pools.adoc deleted file mode 100644 index e0c7bd03c0ca..000000000000 --- a/modules/compliance-custom-node-pools.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-custom-node-pools_{context}"] -= Scanning custom node pools - -The Compliance Operator does not maintain a copy of each node pool configuration. The Compliance Operator aggregates consistent configuration options for all nodes within a single node pool into one copy of the configuration file. The Compliance Operator then uses the configuration file for a particular node pool to evaluate rules against nodes within that pool. - -If your cluster uses custom node pools outside the default `worker` and `master` node pools, you must supply additional variables to ensure the Compliance Operator aggregates a configuration file for that node pool. - -.Procedure - -. To check the configuration against all pools in an example cluster containing `master`, `worker`, and custom `example` node pools, set the value of the `ocp-var-role-master` and `opc-var-role-worker` fields to `example` in the `TailoredProfile` object: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: cis-example-tp -spec: - extends: ocp4-cis - title: My modified NIST profile to scan example nodes - setValues: - - name: ocp4-var-role-master - value: example - rationale: test for example nodes - - name: ocp4-var-role-worker - value: example - rationale: test for example nodes - description: cis-example-scan ----- - -. Add the `example` role to the `ScanSetting` object that will be stored in the `ScanSettingBinding` CR: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - rotation: 3 - size: 1Gi -roles: -- worker -- master -- example -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' ----- - -. Create a scan that uses the `ScanSettingBinding` CR: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: cis - namespace: openshift-compliance -profiles: -- apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-cis -- apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-cis-node -- apiGroup: compliance.openshift.io/v1alpha1 - kind: TailoredProfile - name: cis-example-tp -settingsRef: - apiGroup: compliance.openshift.io/v1alpha1 - kind: ScanSetting - name: default ----- - -The Compliance Operator checks the runtime `KubeletConfig` through the `Node/Proxy` API object and then uses variables such as `ocp-var-role-master` and `ocp-var-role-worker` to determine the nodes it performs the check against. In the `ComplianceCheckResult`, the `KubeletConfig` rules are shown as `ocp4-cis-kubelet-*`. The scan passes only if all selected nodes pass this check. - -.Verification - -* The Platform KubeletConfig rules are checked through the `Node/Proxy` object. You can find those rules by running the following command: -+ -[source,terminal] ----- -$ oc get rules -o json | jq '.items[] | select(.checkType == "Platform") | select(.metadata.name | contains("ocp4-kubelet-")) | .metadata.name' ----- - diff --git a/modules/compliance-custom-scc.adoc b/modules/compliance-custom-scc.adoc deleted file mode 100644 index fbb0ad118971..000000000000 --- a/modules/compliance-custom-scc.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-custom-scc_{context}"] -= Creating a custom SCC for the Compliance Operator - -In some environments, you must create a custom Security Context Constraints (SCC) file to ensure the correct permissions are available to the Compliance Operator `api-resource-collector`. - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure -. Define the SCC in a YAML file named `restricted-adjusted-compliance.yaml`: -+ -.`SecurityContextConstraints` object definition -[source,yaml] ----- - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegeEscalation: true - allowPrivilegedContainer: false - allowedCapabilities: null - apiVersion: security.openshift.io/v1 - defaultAddCapabilities: null - fsGroup: - type: MustRunAs - kind: SecurityContextConstraints - metadata: - name: restricted-adjusted-compliance - priority: 30 <1> - readOnlyRootFilesystem: false - requiredDropCapabilities: - - KILL - - SETUID - - SETGID - - MKNOD - runAsUser: - type: MustRunAsRange - seLinuxContext: - type: MustRunAs - supplementalGroups: - type: RunAsAny - users: - - system:serviceaccount:openshift-compliance:api-resource-collector <2> - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - projected - - secret ----- -<1> The priority of this SCC must be higher than any other SCC that applies to the `system:authenticated` group. -<2> Service Account used by Compliance Operator Scanner pod. - -. Create the SCC: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f restricted-adjusted-compliance.yaml ----- -+ -.Example output -[source,terminal] ----- -securitycontextconstraints.security.openshift.io/restricted-adjusted-compliance created ----- - -.Verification -. Verify the SCC was created: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance scc restricted-adjusted-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP PRIORITY READONLYROOTFS VOLUMES -restricted-adjusted-compliance false <no value> MustRunAs MustRunAsRange MustRunAs RunAsAny 30 false ["configMap","downwardAPI","emptyDir","persistentVolumeClaim","projected","secret"] ----- diff --git a/modules/compliance-custom-storage.adoc b/modules/compliance-custom-storage.adoc deleted file mode 100644 index ccb7299dd930..000000000000 --- a/modules/compliance-custom-storage.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-custom-storage_{context}"] -= Setting custom storage size for results -While the custom resources such as `ComplianceCheckResult` represent an aggregated result of one check across all scanned nodes, it can be useful to review the raw results as produced by the scanner. The raw results are produced in the ARF format and can be large (tens of megabytes per node), it is impractical to store them in a Kubernetes resource backed by the `etcd` key-value store. Instead, every scan creates a persistent volume (PV) which defaults to 1GB size. Depending on your environment, you may want to increase the PV size accordingly. This is done using the `rawResultStorage.size` attribute that is exposed in both the `ScanSetting` and `ComplianceScan` resources. - -A related parameter is `rawResultStorage.rotation` which controls how many scans are retained in the PV before the older scans are rotated. The default value is 3, setting the rotation policy to 0 disables the rotation. Given the default rotation policy and an estimate of 100MB per a raw ARF scan report, you can calculate the right PV size for your environment. - -[id="using-custom-result-storage-values_{context}"] -== Using custom result storage values -Because {product-title} can be deployed in a variety of public clouds or bare metal, the Compliance Operator cannot determine available storage configurations. By default, the Compliance Operator will try to create the PV for storing results using the default storage class of the cluster, but a custom storage class can be configured using the `rawResultStorage.StorageClassName` attribute. - -[IMPORTANT] -==== -If your cluster does not specify a default storage class, this attribute must be set. -==== - -Configure the `ScanSetting` custom resource to use a standard storage class and create persistent volumes that are 10GB in size and keep the last 10 results: - -.Example `ScanSetting` CR - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - storageClassName: standard - rotation: 10 - size: 10Gi -roles: -- worker -- master -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' ----- diff --git a/modules/compliance-evaluate-kubeletconfig-rules.adoc b/modules/compliance-evaluate-kubeletconfig-rules.adoc deleted file mode 100644 index 181e9a42a303..000000000000 --- a/modules/compliance-evaluate-kubeletconfig-rules.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: CONCEPT -[id="compliance-evaluate-kubeletconfig-rules_{context}"] -= Evaluating KubeletConfig rules against default configuration values - -{product-title} infrastructure might contain incomplete configuration files at run time, and nodes assume default configuration values for missing configuration options. Some configuration options can be passed as command line arguments. As a result, the Compliance Operator cannot verify if the configuration file on the node is complete because it might be missing options used in the rule checks. - -To prevent false negative results where the default configuration value passes a check, the Compliance Operator uses the Node/Proxy API to fetch the configuration for each node in a node pool, then all configuration options that are consistent across nodes in the node pool are stored in a file that represents the configuration for all nodes within that node pool. This increases the accuracy of the scan results. - -No additional configuration changes are required to use this feature with default `master` and `worker` node pools configurations. \ No newline at end of file diff --git a/modules/compliance-filtering-results.adoc b/modules/compliance-filtering-results.adoc deleted file mode 100644 index 6f3356d5fd45..000000000000 --- a/modules/compliance-filtering-results.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="filtering-compliance-check-results_{context}"] -= Filters for compliance check results - -By default, the `ComplianceCheckResult` objects are labeled with several useful labels that allow you to query the checks and decide on the next steps after the results are generated. - -List checks that belong to a specific suite: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ - -l compliance.openshift.io/suite=workers-compliancesuite ----- - -List checks that belong to a specific scan: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l compliance.openshift.io/scan=workers-scan ----- - -Not all `ComplianceCheckResult` objects create `ComplianceRemediation` objects. Only `ComplianceCheckResult` objects that can be remediated automatically do. A `ComplianceCheckResult` object has a related remediation if it is labeled with the `compliance.openshift.io/automated-remediation` label. The name of the remediation is the same as the name of the check. - -List all failing checks that can be remediated automatically: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l 'compliance.openshift.io/check-status=FAIL,compliance.openshift.io/automated-remediation' ----- - - -List all failing checks sorted by severity: - -[source,terminal] ----- -$ oc get compliancecheckresults -n openshift-compliance \ --l 'compliance.openshift.io/check-status=FAIL,compliance.openshift.io/check-severity=high' ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -nist-moderate-modified-master-configure-crypto-policy FAIL high -nist-moderate-modified-master-coreos-pti-kernel-argument FAIL high -nist-moderate-modified-master-disable-ctrlaltdel-burstaction FAIL high -nist-moderate-modified-master-disable-ctrlaltdel-reboot FAIL high -nist-moderate-modified-master-enable-fips-mode FAIL high -nist-moderate-modified-master-no-empty-passwords FAIL high -nist-moderate-modified-master-selinux-state FAIL high -nist-moderate-modified-worker-configure-crypto-policy FAIL high -nist-moderate-modified-worker-coreos-pti-kernel-argument FAIL high -nist-moderate-modified-worker-disable-ctrlaltdel-burstaction FAIL high -nist-moderate-modified-worker-disable-ctrlaltdel-reboot FAIL high -nist-moderate-modified-worker-enable-fips-mode FAIL high -nist-moderate-modified-worker-no-empty-passwords FAIL high -nist-moderate-modified-worker-selinux-state FAIL high -ocp4-moderate-configure-network-policies-namespaces FAIL high -ocp4-moderate-fips-mode-enabled-on-all-nodes FAIL high ----- - -List all failing checks that must be remediated manually: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l 'compliance.openshift.io/check-status=FAIL,!compliance.openshift.io/automated-remediation' ----- - -The manual remediation steps are typically stored in the `description` attribute in the `ComplianceCheckResult` object. - -.ComplianceCheckResult Status -[cols="1,1",options="header"] -|=== -| ComplianceCheckResult Status | Description -| PASS -| Compliance check ran to completion and passed. -| FAIL -| Compliance check ran to completion and failed. -| INFO -| Compliance check ran to completion and found something not severe enough to be considered an error. -| MANUAL -| Compliance check does not have a way to automatically assess the success or failure and must be checked manually. -| INCONSISTENT -| Compliance check reports different results from different sources, typically cluster nodes. -| ERROR -| Compliance check ran, but could not complete properly. -| NOT-APPLICABLE -| Compliance check did not run because it is not applicable or not selected. -|=== diff --git a/modules/compliance-imagestreams.adoc b/modules/compliance-imagestreams.adoc deleted file mode 100644 index 4d7c883701af..000000000000 --- a/modules/compliance-imagestreams.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: PROCEDURE -[id="compliance-imagestreams_{context}"] -= Using image streams - -The `contentImage` reference points to a valid `ImageStreamTag`, and the Compliance Operator ensures that the content stays up to date automatically. - -[NOTE] -==== -`ProfileBundle` objects also accept `ImageStream` references. -==== - -.Example image stream -[source,terminal] ----- -$ oc get is -n openshift-compliance ----- - -.Example output -[source,terminal] ----- -NAME IMAGE REPOSITORY TAGS UPDATED -openscap-ocp4-ds image-registry.openshift-image-registry.svc:5000/openshift-compliance/openscap-ocp4-ds latest 32 seconds ago ----- - -.Procedure -. Ensure that the lookup policy is set to local: -+ -[source,terminal] ----- -$ oc patch is openscap-ocp4-ds \ - -p '{"spec":{"lookupPolicy":{"local":true}}}' \ - --type=merge - imagestream.image.openshift.io/openscap-ocp4-ds patched - -n openshift-compliance ----- - -. Use the name of the `ImageStreamTag` for the `ProfileBundle` by retrieving the `istag` name: -+ -[source,terminal] ----- -$ oc get istag -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME IMAGE REFERENCE UPDATED -openscap-ocp4-ds:latest image-registry.openshift-image-registry.svc:5000/openshift-compliance/openscap-ocp4-ds@sha256:46d7ca9b7055fe56ade818ec3e62882cfcc2d27b9bf0d1cbae9f4b6df2710c96 3 minutes ago ----- - -. Create the `ProfileBundle`: -+ -[source,terminal] ----- -$ cat << EOF | oc create -f - -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - name: mybundle - spec: - contentImage: openscap-ocp4-ds:latest - contentFile: ssg-rhcos4-ds.xml -EOF ----- - -This `ProfileBundle` will track the image and any changes that are applied to it, such as updating the tag to point to a different hash, will immediately be reflected in the `ProfileBundle`. diff --git a/modules/compliance-inconsistent.adoc b/modules/compliance-inconsistent.adoc deleted file mode 100644 index d0d734877259..000000000000 --- a/modules/compliance-inconsistent.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-inconsistent_{context}"] -= Inconsistent ComplianceScan -The `ScanSetting` object lists the node roles that the compliance scans generated from the `ScanSetting` or `ScanSettingBinding` objects would scan. Each node role usually maps to a machine config pool. - -[IMPORTANT] -==== -It is expected that all machines in a machine config pool are identical and all scan results from the nodes in a pool should be identical. -==== - -If some of the results are different from others, the Compliance Operator flags a `ComplianceCheckResult` object where some of the nodes will report as `INCONSISTENT`. All `ComplianceCheckResult` objects are also labeled with `compliance.openshift.io/inconsistent-check`. - -Because the number of machines in a pool might be quite large, the Compliance Operator attempts to find the most common state and list the nodes that differ from the common state. The most common state is stored in the `compliance.openshift.io/most-common-status` annotation and the annotation `compliance.openshift.io/inconsistent-source` contains pairs of `hostname:status` of check statuses that differ from the most common status. If no common state can be found, all the `hostname:status` pairs are listed in the `compliance.openshift.io/inconsistent-source annotation`. - -If possible, a remediation is still created so that the cluster can converge to a compliant status. However, this might not always be possible and correcting the difference between nodes must be done manually. The compliance scan must be re-run to get a consistent result by annotating the scan with the `compliance.openshift.io/rescan=` option: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- diff --git a/modules/compliance-increasing-operator-limits.adoc b/modules/compliance-increasing-operator-limits.adoc deleted file mode 100644 index 77c8f5bd17b8..000000000000 --- a/modules/compliance-increasing-operator-limits.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="compliance-increasing-operator-limits_{context}"] -= Increasing Compliance Operator resource limits - -In some cases, the Compliance Operator might require more memory than the default limits allow. The best way to mitigate this issue is to set custom resource limits. - -To increase the default memory and CPU limits of scanner pods, see _`ScanSetting` Custom resource_. - -.Procedure - -. To increase the Operator's memory limits to 500 Mi, create the following patch file named `co-memlimit-patch.yaml`: -+ -[source,yaml] ----- -spec: - config: - resources: - limits: - memory: 500Mi ----- - -. Apply the patch file: -+ -[source,terminal] ----- -$ oc patch sub compliance-operator -nopenshift-compliance --patch-file co-memlimit-patch.yaml --type=merge ----- \ No newline at end of file diff --git a/modules/compliance-kubeletconfig-sub-pool-remediation.adoc b/modules/compliance-kubeletconfig-sub-pool-remediation.adoc deleted file mode 100644 index 471299ca99c7..000000000000 --- a/modules/compliance-kubeletconfig-sub-pool-remediation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-kubeletconfig-sub-pool-remediation_{context}"] -= Remediating `KubeletConfig` sub pools - -`KubeletConfig` remediation labels can be applied to `MachineConfigPool` sub-pools. - -.Procedure - -* Add a label to the sub-pool `MachineConfigPool` CR: -+ -[source,terminal] ----- -$ oc label mcp <sub-pool-name> pools.operator.machineconfiguration.openshift.io/<sub-pool-name>= ----- \ No newline at end of file diff --git a/modules/compliance-manual.adoc b/modules/compliance-manual.adoc deleted file mode 100644 index 52ef596ecb58..000000000000 --- a/modules/compliance-manual.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-manual_{context}"] -= Remediating a platform check manually - -Checks for Platform scans typically have to be remediated manually by the administrator for two reasons: - -* It is not always possible to automatically determine the value that must be set. One of the checks requires that a list of allowed registries is provided, but the scanner has no way of knowing which registries the organization wants to allow. - -* Different checks modify different API objects, requiring automated remediation to possess `root` or superuser access to modify objects in the cluster, which is not advised. - -.Procedure -. The example below uses the `ocp4-ocp-allowed-registries-for-import` rule, which would fail on a default {product-title} installation. Inspect the rule `oc get rule.compliance/ocp4-ocp-allowed-registries-for-import -oyaml`, the rule is to limit the registries the users are allowed to import images from by setting the `allowedRegistriesForImport` attribute, The _warning_ attribute of the rule also shows the API object checked, so it can be modified and remediate the issue: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -.Example output -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2020-09-10T10:12:54Z" - generation: 2 - name: cluster - resourceVersion: "363096" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: 2dcb614e-2f8a-4a23-ba9a-8e33cd0ff77e -spec: - allowedRegistriesForImport: - - domainName: registry.redhat.io -status: - externalRegistryHostnames: - - default-route-openshift-image-registry.apps.user-cluster-09-10-12-07.devcluster.openshift.com - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- - -. Re-run the scan: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- diff --git a/modules/compliance-new-tailored-profiles.adoc b/modules/compliance-new-tailored-profiles.adoc deleted file mode 100644 index 9c8468d3361c..000000000000 --- a/modules/compliance-new-tailored-profiles.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-tailor.adoc - -:_content-type: PROCEDURE -[id="compliance-new-tailored-profiles_{context}"] -= Creating a new tailored profile - -You can write a tailored profile from scratch using the `TailoredProfile` object. Set an appropriate `title` and `description` and leave the `extends` field empty. Indicate to the Compliance Operator what type of scan will this custom profile generate: - -* Node scan: Scans the Operating System. -* Platform scan: Scans the OpenShift configuration. - -.Procedure - -Set the following annotation on the `TailoredProfile` object: -+ -.Example `new-profile.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: new-profile - annotations: - compliance.openshift.io/product-type: Node <1> -spec: - extends: - description: My custom profile <2> - title: Custom profile <3> ----- -<1> Set `Node` or `Platform` accordingly. -<2> Use the `description` field to describe the function of the new `TailoredProfile` object. -<3> Give your `TailoredProfile` object a title with the `title` field. -+ -[NOTE] -==== -Adding the `-node` suffix to the `name` field of the `TailoredProfile` object is similar to adding the `Node` product type annotation and generates an Operating System scan. -==== \ No newline at end of file diff --git a/modules/compliance-objects.adoc b/modules/compliance-objects.adoc deleted file mode 100644 index 0db4e4b9a693..000000000000 --- a/modules/compliance-objects.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-objects_{context}"] -= Using the ComplianceSuite and ComplianceScan objects directly - -While it is recommended that users take advantage of the `ScanSetting` and `ScanSettingBinding` objects to define the suites and scans, there are valid use cases to define the `ComplianceSuite` objects directly: - -* Specifying only a single rule to scan. This can be useful for debugging together with the `debug: true` attribute which increases the OpenSCAP scanner verbosity, as the debug mode tends to get quite verbose otherwise. Limiting the test to one rule helps to lower the amount of debug information. - -* Providing a custom nodeSelector. In order for a remediation to be applicable, the nodeSelector must match a pool. - -* Pointing the Scan to a bespoke config map with a tailoring file. - -* For testing or development when the overhead of parsing profiles from bundles is not required. - -The following example shows a `ComplianceSuite` that scans the worker machines with only a single rule: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: workers-compliancesuite -spec: - scans: - - name: workers-scan - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - debug: true - rule: xccdf_org.ssgproject.content_rule_no_direct_root_logins - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -The `ComplianceSuite` object and the `ComplianceScan` objects referred to above specify several attributes in a format that OpenSCAP expects. - -To find out the profile, content, or rule values, you can start by creating a similar Suite from `ScanSetting` and `ScanSettingBinding` or inspect the objects parsed from the `ProfileBundle` objects like rules or profiles. Those objects contain the `xccdf_org` identifiers you can use to refer to them from a `ComplianceSuite`. diff --git a/modules/compliance-operator-cli-installation.adoc b/modules/compliance-operator-cli-installation.adoc deleted file mode 100644 index f9cb58f3f487..000000000000 --- a/modules/compliance-operator-cli-installation.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-cli_{context}"] -= Installing the Compliance Operator using the CLI - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. Define a `Namespace` object: -+ -.Example `namespace-object.yaml` -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged <1> - name: openshift-compliance ----- -<1> In {product-title} {product-version}, the pod security label must be set to `privileged` at the namespace level. - -. Create the `Namespace` object: -+ -[source,terminal] ----- -$ oc create -f namespace-object.yaml ----- - -. Define an `OperatorGroup` object: -+ -.Example `operator-group-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: compliance-operator - namespace: openshift-compliance -spec: - targetNamespaces: - - openshift-compliance ----- - -. Create the `OperatorGroup` object: -+ -[source,terminal] ----- -$ oc create -f operator-group-object.yaml ----- - -. Define a `Subscription` object: -+ -.Example `subscription-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: compliance-operator-sub - namespace: openshift-compliance -spec: - channel: "stable" - installPlanApproval: Automatic - name: compliance-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- -. Create the `Subscription` object: -+ -[source,terminal] ----- -$ oc create -f subscription-object.yaml ----- - -[NOTE] -==== -If you are setting the global scheduler feature and enable `defaultNodeSelector`, you must create the namespace manually and update the annotations of the `openshift-compliance` namespace, or the namespace where the Compliance Operator was installed, with `openshift.io/node-selector: “”`. This removes the default node selector and prevents deployment failures. -==== - -.Verification - -. Verify the installation succeeded by inspecting the CSV file: -+ -[source,terminal] ----- -$ oc get csv -n openshift-compliance ----- - -. Verify that the Compliance Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-compliance ----- diff --git a/modules/compliance-operator-cli-uninstall.adoc b/modules/compliance-operator-cli-uninstall.adoc deleted file mode 100644 index 47625212d37c..000000000000 --- a/modules/compliance-operator-cli-uninstall.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// security/compliance_operator/compliance-operator-uninstallation.adoc - -:_content-type: PROCEDURE -[id="compliance-operator-uninstall-cli_{context}"] -= Uninstalling the OpenShift Compliance Operator from {product-title} using the CLI - -To remove the Compliance Operator, you must first delete the objects in the namespace. After the objects are removed, you can remove the Operator and its namespace by deleting the *openshift-compliance* project. - -.Prerequisites - -* Access to an {product-title} cluster using an account with `cluster-admin` permissions. -* The OpenShift Compliance Operator must be installed. - -.Procedure - -. Delete all objects in the namespace. - -.. Delete the `ScanSettingBinding` objects: -+ -[source,terminal] ----- -$ oc delete ssb <ScanSettingBinding-name> -n openshift-compliance ----- - -.. Delete the `ScanSetting` objects: -+ -[source,terminal] ----- -$ oc delete ss <ScanSetting-name> -n openshift-compliance ----- - -.. Delete the `ComplianceSuite` objects: -+ -[source,terminal] ----- -$ oc delete suite <compliancesuite-name> -n openshift-compliance ----- - -.. Delete the `ComplianceScan` objects: -+ -[source,terminal] ----- -$ oc delete scan <compliancescan-name> -n openshift-compliance ----- - -.. Obtain the `ProfileBundle` objects: -+ -[source,terminal] ----- -$ oc get profilebundle.compliance -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME CONTENTIMAGE CONTENTFILE STATUS -ocp4 registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:<hash> ssg-ocp4-ds.xml VALID -rhcos4 registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:<hash> ssg-rhcos4-ds.xml VALID ----- - -.. Delete the `ProfileBundle` objects: -+ -[source,terminal] ----- -$ oc delete profilebundle.compliance ocp4 rhcos4 -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -profilebundle.compliance.openshift.io "ocp4" deleted -profilebundle.compliance.openshift.io "rhcos4" deleted ----- - -. Delete the Subscription object: -+ -[source,terminal] ----- -$ oc delete sub <Subscription-Name> -n openshift-compliance ----- - -. Delete the CSV object: -+ -[source,terminal] ----- -$ oc delete CSV -n openshift-compliance ----- - -. Delete the project: -+ -[source,terminal] ----- -$ oc delete project -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -project.project.openshift.io "openshift-compliance" deleted ----- - -.Verification - -. Confirm the namespace is deleted: -+ -[source,terminal] ----- -$ oc get project/openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -Error from server (NotFound): namespaces "openshift-compliance" not found ----- \ No newline at end of file diff --git a/modules/compliance-operator-console-installation.adoc b/modules/compliance-operator-console-installation.adoc deleted file mode 100644 index c0dd1e7ac2c4..000000000000 --- a/modules/compliance-operator-console-installation.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-web-console_{context}"] -= Installing the Compliance Operator through the web console - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the Compliance Operator, then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-compliance` namespace. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Compliance Operator is installed in the `openshift-compliance` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-compliance` project that are reporting issues. diff --git a/modules/compliance-operator-hcp-install.adoc b/modules/compliance-operator-hcp-install.adoc deleted file mode 100644 index ff9541360399..000000000000 --- a/modules/compliance-operator-hcp-install.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-hcp_{context}"] -= Installing the Compliance Operator on Hosted control planes - -The Compliance Operator can be installed in Hosted control planes using the OperatorHub by creating a `Subscription` file. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. Define a `Namespace` object similar to the following: -+ -.Example `namespace-object.yaml` -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged <1> - name: openshift-compliance ----- -<1> In {product-title} {product-version}, the pod security label must be set to `privileged` at the namespace level. - -. Create the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f namespace-object.yaml ----- - -. Define an `OperatorGroup` object: -+ -.Example `operator-group-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: compliance-operator - namespace: openshift-compliance -spec: - targetNamespaces: - - openshift-compliance ----- - -. Create the `OperatorGroup` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f operator-group-object.yaml ----- - -. Define a `Subscription` object: -+ -.Example `subscription-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: compliance-operator-sub - namespace: openshift-compliance -spec: - channel: "stable" - installPlanApproval: Automatic - name: compliance-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - config: - nodeSelector: - node-role.kubernetes.io/worker: "" - env: - - name: PLATFORM - value: "HyperShift" ----- - -. Create the `Subscription` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f subscription-object.yaml ----- - -.Verification - -. Verify the installation succeeded by inspecting the CSV file by running the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-compliance ----- - -. Verify that the Compliance Operator is up and running by running the following command: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-compliance ----- \ No newline at end of file diff --git a/modules/compliance-operator-uninstall.adoc b/modules/compliance-operator-uninstall.adoc deleted file mode 100644 index d45269360b85..000000000000 --- a/modules/compliance-operator-uninstall.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// security/compliance_operator/compliance-operator-uninstallation.adoc - -:_content-type: PROCEDURE -[id="compliance-operator-uninstall_{context}"] -= Uninstalling the OpenShift Compliance Operator from {product-title} using the web console - -To remove the Compliance Operator, you must first delete the objects in the namespace. After the objects are removed, you can remove the Operator and its namespace by deleting the *openshift-compliance* project. - -.Prerequisites - -* Access to an {product-title} cluster using an account with `cluster-admin` permissions. -* The OpenShift Compliance Operator must be installed. - -.Procedure - -To remove the Compliance Operator by using the {product-title} web console: - -. Go to the *Operators* -> *Installed Operators* -> *Compliance Operator* page. - -.. Click *All instances*. - -.. In *All namespaces*, click the Options menu {kebab} and delete all ScanSettingBinding, ComplainceSuite, ComplianceScan, and ProfileBundle objects. - -. Switch to the *Administration* -> *Operators* -> *Installed Operators* page. - -. Click the Options menu {kebab} on the *Compliance Operator* entry and select *Uninstall Operator*. - -. Switch to the *Home* -> *Projects* page. - -. Search for 'compliance'. - -. Click the Options menu {kebab} next to the *openshift-compliance* project, and select *Delete Project*. - -.. Confirm the deletion by typing `openshift-compliance` in the dialog box, and click *Delete*. \ No newline at end of file diff --git a/modules/compliance-priorityclass.adoc b/modules/compliance-priorityclass.adoc deleted file mode 100644 index fc62847beb78..000000000000 --- a/modules/compliance-priorityclass.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-priorityclass_{context}"] -= Setting `PriorityClass` for `ScanSetting` scans - -In large scale environments, the default `PriorityClass` object can be too low to guarantee Pods execute scans on time. For clusters that must maintain compliance or guarantee automated scanning, it is recommended to set the `PriorityClass` variable to ensure the Compliance Operator is always given priority in resource constrained situations. - -.Procedure - -* Set the `PriorityClass` variable: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -strictNodeScan: true -metadata: - name: default - namespace: openshift-compliance -priorityClass: compliance-high-priority <1> -kind: ScanSetting -showNotApplicable: false -rawResultStorage: - nodeSelector: - node-role.kubernetes.io/master: '' - pvAccessModes: - - ReadWriteOnce - rotation: 3 - size: 1Gi - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 300 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 300 - - effect: NoSchedule - key: node.kubernetes.io/memory-pressure - operator: Exists -schedule: 0 1 * * * -roles: - - master - - worker -scanTolerations: - - operator: Exists ----- -<1> If the `PriorityClass` referenced in the `ScanSetting` cannot be found, the Operator will leave the `PriorityClass` empty, issue a warning, and continue scheduling scans without a `PriorityClass`. \ No newline at end of file diff --git a/modules/compliance-profilebundle.adoc b/modules/compliance-profilebundle.adoc deleted file mode 100644 index efad4006acb9..000000000000 --- a/modules/compliance-profilebundle.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: CONCEPT -[id="compliance-profilebundle_{context}"] -= ProfileBundle CR example - -The `ProfileBundle` object requires two pieces of information: the URL of a container image that contains the `contentImage` and the file that contains the compliance content. The `contentFile` parameter is relative to the root of the file system. You can define the built-in `rhcos4` `ProfileBundle` object as shown in the following example: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - creationTimestamp: "2022-10-19T12:06:30Z" - finalizers: - - profilebundle.finalizers.compliance.openshift.io - generation: 1 - name: rhcos4 - namespace: openshift-compliance - resourceVersion: "46741" - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d -spec: - contentFile: ssg-rhcos4-ds.xml <1> - contentImage: registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:900e... <2> -status: - conditions: - - lastTransitionTime: "2022-10-19T12:07:51Z" - message: Profile bundle successfully parsed - reason: Valid - status: "True" - type: Ready - dataStreamStatus: VALID ----- -<1> Location of the file containing the compliance content. -<2> Content image location. -+ -[IMPORTANT] -==== -The base image used for the content images must include `coreutils`. -==== diff --git a/modules/compliance-profiles.adoc b/modules/compliance-profiles.adoc deleted file mode 100644 index dbcbf8c3960c..000000000000 --- a/modules/compliance-profiles.adoc +++ /dev/null @@ -1,208 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-understanding.adoc - -:_content-type: CONCEPT -[id="compliance_profiles_{context}"] -= Compliance Operator profiles - -There are several profiles available as part of the Compliance Operator installation. You can use the `oc get` command to view available profiles, profile details, and specific rules. - -* View the available profiles: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance profiles.compliance ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -ocp4-cis 94m -ocp4-cis-node 94m -ocp4-e8 94m -ocp4-high 94m -ocp4-high-node 94m -ocp4-moderate 94m -ocp4-moderate-node 94m -ocp4-nerc-cip 94m -ocp4-nerc-cip-node 94m -ocp4-pci-dss 94m -ocp4-pci-dss-node 94m -rhcos4-e8 94m -rhcos4-high 94m -rhcos4-moderate 94m -rhcos4-nerc-cip 94m ----- -+ -These profiles represent different compliance benchmarks. Each profile has the product name that it applies to added as a prefix to the profile’s name. `ocp4-e8` applies the Essential 8 benchmark to the {product-title} product, while `rhcos4-e8` applies the Essential 8 benchmark to the {op-system-first} product. - -* Run the following command to view the details of the `rhcos4-e8` profile: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance -oyaml profiles.compliance rhcos4-e8 ----- -+ -.Example output -[%collapsible] -==== -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -description: 'This profile contains configuration checks for Red Hat Enterprise Linux - CoreOS that align to the Australian Cyber Security Centre (ACSC) Essential Eight. - A copy of the Essential Eight in Linux Environments guide can be found at the ACSC - website: https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers' -id: xccdf_org.ssgproject.content_profile_e8 -kind: Profile -metadata: - annotations: - compliance.openshift.io/image-digest: pb-rhcos4hrdkm - compliance.openshift.io/product: redhat_enterprise_linux_coreos_4 - compliance.openshift.io/product-type: Node - creationTimestamp: "2022-10-19T12:06:49Z" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: rhcos4 - name: rhcos4-e8 - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: rhcos4 - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d - resourceVersion: "43699" - uid: 86353f70-28f7-40b4-bf0e-6289ec33675b -rules: -- rhcos4-accounts-no-uid-except-zero -- rhcos4-audit-rules-dac-modification-chmod -- rhcos4-audit-rules-dac-modification-chown -- rhcos4-audit-rules-execution-chcon -- rhcos4-audit-rules-execution-restorecon -- rhcos4-audit-rules-execution-semanage -- rhcos4-audit-rules-execution-setfiles -- rhcos4-audit-rules-execution-setsebool -- rhcos4-audit-rules-execution-seunshare -- rhcos4-audit-rules-kernel-module-loading-delete -- rhcos4-audit-rules-kernel-module-loading-finit -- rhcos4-audit-rules-kernel-module-loading-init -- rhcos4-audit-rules-login-events -- rhcos4-audit-rules-login-events-faillock -- rhcos4-audit-rules-login-events-lastlog -- rhcos4-audit-rules-login-events-tallylog -- rhcos4-audit-rules-networkconfig-modification -- rhcos4-audit-rules-sysadmin-actions -- rhcos4-audit-rules-time-adjtimex -- rhcos4-audit-rules-time-clock-settime -- rhcos4-audit-rules-time-settimeofday -- rhcos4-audit-rules-time-stime -- rhcos4-audit-rules-time-watch-localtime -- rhcos4-audit-rules-usergroup-modification -- rhcos4-auditd-data-retention-flush -- rhcos4-auditd-freq -- rhcos4-auditd-local-events -- rhcos4-auditd-log-format -- rhcos4-auditd-name-format -- rhcos4-auditd-write-logs -- rhcos4-configure-crypto-policy -- rhcos4-configure-ssh-crypto-policy -- rhcos4-no-empty-passwords -- rhcos4-selinux-policytype -- rhcos4-selinux-state -- rhcos4-service-auditd-enabled -- rhcos4-sshd-disable-empty-passwords -- rhcos4-sshd-disable-gssapi-auth -- rhcos4-sshd-disable-rhosts -- rhcos4-sshd-disable-root-login -- rhcos4-sshd-disable-user-known-hosts -- rhcos4-sshd-do-not-permit-user-env -- rhcos4-sshd-enable-strictmodes -- rhcos4-sshd-print-last-log -- rhcos4-sshd-set-loglevel-info -- rhcos4-sysctl-kernel-dmesg-restrict -- rhcos4-sysctl-kernel-kptr-restrict -- rhcos4-sysctl-kernel-randomize-va-space -- rhcos4-sysctl-kernel-unprivileged-bpf-disabled -- rhcos4-sysctl-kernel-yama-ptrace-scope -- rhcos4-sysctl-net-core-bpf-jit-harden -title: Australian Cyber Security Centre (ACSC) Essential Eight ----- -==== - -* Run the following command to view the details of the `rhcos4-audit-rules-login-events` rule: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance -oyaml rules rhcos4-audit-rules-login-events ----- -+ -.Example output -[%collapsible] -==== -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -checkType: Node -description: |- - The audit system already collects login information for all users and root. If the auditd daemon is configured to use the augenrules program to read audit rules during daemon startup (the default), add the following lines to a file with suffix.rules in the directory /etc/audit/rules.d in order to watch for attempted manual edits of files involved in storing logon events: - - -w /var/log/tallylog -p wa -k logins - -w /var/run/faillock -p wa -k logins - -w /var/log/lastlog -p wa -k logins - - If the auditd daemon is configured to use the auditctl utility to read audit rules during daemon startup, add the following lines to /etc/audit/audit.rules file in order to watch for unattempted manual edits of files involved in storing logon events: - - -w /var/log/tallylog -p wa -k logins - -w /var/run/faillock -p wa -k logins - -w /var/log/lastlog -p wa -k logins -id: xccdf_org.ssgproject.content_rule_audit_rules_login_events -kind: Rule -metadata: - annotations: - compliance.openshift.io/image-digest: pb-rhcos4hrdkm - compliance.openshift.io/rule: audit-rules-login-events - control.compliance.openshift.io/NIST-800-53: AU-2(d);AU-12(c);AC-6(9);CM-6(a) - control.compliance.openshift.io/PCI-DSS: Req-10.2.3 - policies.open-cluster-management.io/controls: AU-2(d),AU-12(c),AC-6(9),CM-6(a),Req-10.2.3 - policies.open-cluster-management.io/standards: NIST-800-53,PCI-DSS - creationTimestamp: "2022-10-19T12:07:08Z" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: rhcos4 - name: rhcos4-audit-rules-login-events - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: rhcos4 - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d - resourceVersion: "44819" - uid: 75872f1f-3c93-40ca-a69d-44e5438824a4 -rationale: Manual editing of these files may indicate nefarious activity, such as - an attacker attempting to remove evidence of an intrusion. -severity: medium -title: Record Attempts to Alter Logon and Logout Events -warning: Manual editing of these files may indicate nefarious activity, such as an - attacker attempting to remove evidence of an intrusion. ----- -==== - -[id="compliance_profile_types{context}"] -== Compliance Operator profile types - -There are two types of compliance profiles available: Platform and Node. - -Platform:: Platform scans target your {product-title} cluster. - -Node:: Node scans target the nodes of the cluster. - -[IMPORTANT] -==== -For compliance profiles that have Node and Platform applications, such as `pci-dss` compliance profiles, you must run both in your {product-title} environment. -==== diff --git a/modules/compliance-raw-tailored.adoc b/modules/compliance-raw-tailored.adoc deleted file mode 100644 index f0cb71982f9d..000000000000 --- a/modules/compliance-raw-tailored.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-raw-tailored_{context}"] -= Using raw tailored profiles -While the `TailoredProfile` CR enables the most common tailoring operations, the XCCDF standard allows even more flexibility in tailoring OpenSCAP profiles. In addition, if your organization has been using OpenScap previously, you may have an existing XCCDF tailoring file and can reuse it. - -The `ComplianceSuite` object contains an optional `TailoringConfigMap` attribute that you can point to a custom tailoring file. The value of the `TailoringConfigMap` attribute is a name of a config map which must contain a key called `tailoring.xml` and the value of this key is the tailoring contents. - -.Procedure -. Create the `ConfigMap` object from a file: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -create configmap nist-moderate-modified \ ---from-file=tailoring.xml=/path/to/the/tailoringFile.xml ----- - -. Reference the tailoring file in a scan that belongs to a suite: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: workers-compliancesuite -spec: - debug: true - scans: - - name: workers-scan - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - debug: true - tailoringConfigMap: - name: nist-moderate-modified - nodeSelector: - node-role.kubernetes.io/worker: "" ----- diff --git a/modules/compliance-removing-kubeletconfig.adoc b/modules/compliance-removing-kubeletconfig.adoc deleted file mode 100644 index be84a1b811b2..000000000000 --- a/modules/compliance-removing-kubeletconfig.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-removing-kubeletconfig_{context}"] -= Removing a KubeletConfig remediation -`KubeletConfig` remediations are included in node-level profiles. In order to remove a KubeletConfig remediation, you must manually remove it from the `KubeletConfig` objects. This example demonstrates how to remove the compliance check for the `one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available` remediation. - -.Procedure - -. Locate the `scan-name` and compliance check for the `one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available` remediation: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get remediation \ one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceRemediation -metadata: - annotations: - compliance.openshift.io/xccdf-value-used: var-kubelet-evictionhard-imagefs-available - creationTimestamp: "2022-01-05T19:52:27Z" - generation: 1 - labels: - compliance.openshift.io/scan-name: one-rule-tp-node-master <1> - compliance.openshift.io/suite: one-rule-ssb-node - name: one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceCheckResult - name: one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available - uid: fe8e1577-9060-4c59-95b2-3e2c51709adc - resourceVersion: "84820" - uid: 5339d21a-24d7-40cb-84d2-7a2ebb015355 -spec: - apply: true - current: - object: - apiVersion: machineconfiguration.openshift.io/v1 - kind: KubeletConfig - spec: - kubeletConfig: - evictionHard: - imagefs.available: 10% <2> - outdated: {} - type: Configuration -status: - applicationState: Applied ----- -<1> The scan name of the remediation. -<2> The remediation that was added to the `KubeletConfig` objects. -+ -[NOTE] -==== -If the remediation invokes an `evictionHard` kubelet configuration, you must specify all of the `evictionHard` parameters: `memory.available`, `nodefs.available`, `nodefs.inodesFree`, `imagefs.available`, and `imagefs.inodesFree`. If you do not specify all parameters, only the specified parameters are applied and the remediation will not function properly. -==== - -. Remove the remediation: - -.. Set `apply` to false for the remediation object: -+ -[source,terminal] ----- -$ oc -n openshift-compliance patch \ -complianceremediations/one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available \ --p '{"spec":{"apply":false}}' --type=merge ----- -+ -.. Using the `scan-name`, find the `KubeletConfig` object that the remediation was applied to: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get kubeletconfig \ ---selector compliance.openshift.io/scan-name=one-rule-tp-node-master ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -compliance-operator-kubelet-master 2m34s ----- -.. Manually remove the remediation, `imagefs.available: 10%`, from the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc edit -n openshift-compliance KubeletConfig compliance-operator-kubelet-master ----- -+ -[IMPORTANT] -==== -All affected nodes with the remediation will be rebooted. -==== - -[NOTE] -==== -You must also exclude the rule from any scheduled scans in your tailored profiles that auto-applies the remediation, otherwise, the remediation will be re-applied during the next scheduled scan. -==== - diff --git a/modules/compliance-rescan.adoc b/modules/compliance-rescan.adoc deleted file mode 100644 index 331f7deea867..000000000000 --- a/modules/compliance-rescan.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-rescan_{context}"] -= Performing a rescan -Typically you will want to re-run a scan on a defined schedule, like every Monday or daily. It can also be useful to re-run a scan once after fixing a problem on a node. To perform a single scan, annotate the scan with the `compliance.openshift.io/rescan=` option: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -A rescan generates four additional `mc` for `rhcos-moderate` profile: - -[source,terminal] ----- -$ oc get mc ----- - -.Example output -[source,terminal] ----- -75-worker-scan-chronyd-or-ntpd-specify-remote-server -75-worker-scan-configure-usbguard-auditbackend -75-worker-scan-service-usbguard-enabled -75-worker-scan-usbguard-allow-hid-and-hub ----- - -[IMPORTANT] -==== -When the scan setting `default-auto-apply` label is applied, remediations are applied automatically and outdated remediations automatically update. If there are remediations that were not applied due to dependencies, or remediations that had been outdated, rescanning applies the remediations and might trigger a reboot. Only remediations that use `MachineConfig` objects trigger reboots. If there are no updates or dependencies to be applied, no reboot occurs. -==== diff --git a/modules/compliance-results.adoc b/modules/compliance-results.adoc deleted file mode 100644 index 72d171cef7ff..000000000000 --- a/modules/compliance-results.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-raw-results.adoc - -:_content-type: PROCEDURE -[id="compliance-results_{context}"] -= Obtaining Compliance Operator raw results from a persistent volume - -.Procedure - -The Compliance Operator generates and stores the raw results in a persistent volume. These results are in Asset Reporting Format (ARF). - -. Explore the `ComplianceSuite` object: -+ -[source,terminal] ----- -$ oc get compliancesuites nist-moderate-modified \ --o json -n openshift-compliance | jq '.status.scanStatuses[].resultsStorage' ----- -+ -.Example output -[source,json] ----- -{ - "name": "ocp4-moderate", - "namespace": "openshift-compliance" -} -{ - "name": "nist-moderate-modified-master", - "namespace": "openshift-compliance" -} -{ - "name": "nist-moderate-modified-worker", - "namespace": "openshift-compliance" -} ----- -+ -This shows the persistent volume claims where the raw results are accessible. - -. Verify the raw data location by using the name and namespace of one of the results: -+ -[source,terminal] ----- -$ oc get pvc -n openshift-compliance rhcos4-moderate-worker ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -rhcos4-moderate-worker Bound pvc-548f6cfe-164b-42fe-ba13-a07cfbc77f3a 1Gi RWO gp2 92m ----- - -. Fetch the raw results by spawning a pod that mounts the volume and copying the results: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f pod.yaml ----- -+ -.Example pod.yaml -[source,yaml] ----- -apiVersion: "v1" -kind: Pod -metadata: - name: pv-extract -spec: - containers: - - name: pv-extract-pod - image: registry.access.redhat.com/ubi9/ubi - command: ["sleep", "3000"] - volumeMounts: - - mountPath: "/workers-scan-results" - name: workers-scan-vol - volumes: - - name: workers-scan-vol - persistentVolumeClaim: - claimName: rhcos4-moderate-worker ----- - -. After the pod is running, download the results: -+ -[source,terminal] ----- -$ oc cp pv-extract:/workers-scan-results -n openshift-compliance . ----- -+ -[IMPORTANT] -==== -Spawning a pod that mounts the persistent volume will keep the claim as `Bound`. If the volume's storage class in use has permissions set to `ReadWriteOnce`, the volume is only mountable by one pod at a time. You must delete the pod upon completion, or it will not be possible for the Operator to schedule a pod and continue storing results in this location. -==== - -. After the extraction is complete, the pod can be deleted: -+ -[source,terminal] ----- -$ oc delete pod pv-extract -n openshift-compliance ----- diff --git a/modules/compliance-review.adoc b/modules/compliance-review.adoc deleted file mode 100644 index 111ac9faa5a8..000000000000 --- a/modules/compliance-review.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -[id="compliance-review_{context}"] -= Reviewing a remediation - -Review both the `ComplianceRemediation` object and the `ComplianceCheckResult` object that owns the remediation. The `ComplianceCheckResult` object contains human-readable descriptions of what the check does and the hardening trying to prevent, as well as other `metadata` like the severity and the associated security controls. The `ComplianceRemediation` object represents a way to fix the problem described in the `ComplianceCheckResult`. After first scan, check for remediations with the state `MissingDependencies`. - -Below is an example of a check and a remediation called `sysctl-net-ipv4-conf-all-accept-redirects`. This example is redacted to only show `spec` and `status` and omits `metadata`: - -[source,yaml] ----- -spec: - apply: false - current: - object: - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/sysctl.d/75-sysctl_net_ipv4_conf_all_accept_redirects.conf - mode: 0644 - contents: - source: data:,net.ipv4.conf.all.accept_redirects%3D0 - outdated: {} -status: - applicationState: NotApplied ----- - -The remediation payload is stored in the `spec.current` attribute. The payload can be any Kubernetes object, but because this remediation was produced by a node scan, the remediation payload in the above example is a `MachineConfig` object. For Platform scans, the remediation payload is often a different kind of an object (for example, a `ConfigMap` or `Secret` object), but typically applying that remediation is up to the administrator, because otherwise the Compliance Operator would have required a very broad set of permissions to manipulate any generic Kubernetes object. An example of remediating a Platform check is provided later in the text. - -To see exactly what the remediation does when applied, the `MachineConfig` object contents use the Ignition objects for the configuration. See the link:https://coreos.github.io/ignition/specs/[Ignition specification] for further information about the format. In our example, `the spec.config.storage.files[0].path` attribute specifies the file that is being create by this remediation (`/etc/sysctl.d/75-sysctl_net_ipv4_conf_all_accept_redirects.conf`) and the `spec.config.storage.files[0].contents.source` attribute specifies the contents of that file. - -[NOTE] -==== -The contents of the files are URL-encoded. -==== - -Use the following Python script to view the contents: - -[source,terminal] ----- -$ echo "net.ipv4.conf.all.accept_redirects%3D0" | python3 -c "import sys, urllib.parse; print(urllib.parse.unquote(''.join(sys.stdin.readlines())))" ----- - -.Example output -[source,terminal] ----- -net.ipv4.conf.all.accept_redirects=0 ----- diff --git a/modules/compliance-scansetting-cr.adoc b/modules/compliance-scansetting-cr.adoc deleted file mode 100644 index a76b1da48e4a..000000000000 --- a/modules/compliance-scansetting-cr.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-scansetting-cr_{context}"] -= `ScanSetting` Custom Resource - -The `ScanSetting` Custom Resource now allows you to override the default CPU and memory limits of scanner pods through the scan limits attribute. The Compliance Operator will use defaults of 500Mi memory, 100m CPU for the scanner container, and 200Mi memory with 100m CPU for the `api-resource-collector` container. To set the memory limits of the Operator, modify the `Subscription` object if installed through OLM or the Operator deployment itself. - -To increase the default CPU and memory limits of the Compliance Operator, see _Increasing Compliance Operator resource limits_. - -[IMPORTANT] -==== -Increasing the memory limit for the Compliance Operator or the scanner pods is needed if the default limits are not sufficient and the Operator or scanner pods are ended by the Out Of Memory (OOM) process. -==== diff --git a/modules/compliance-scheduling-pods-with-resource-requests.adoc b/modules/compliance-scheduling-pods-with-resource-requests.adoc deleted file mode 100644 index 2afb461050d6..000000000000 --- a/modules/compliance-scheduling-pods-with-resource-requests.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-scheduling-pods-with-resource-requests_{context}"] -= Scheduling Pods with container resource requests - -When a Pod is created, the scheduler selects a Node for the Pod to run on. Each node has a maximum capacity for each resource type in the amount of CPU and memory it can provide for the Pods. The scheduler ensures that the sum of the resource requests of the scheduled containers is less than the capacity nodes for each resource type. - -Although memory or CPU resource usage on nodes is very low, the scheduler might still refuse to place a Pod on a node if the capacity check fails to protect against a resource shortage on a node. - -For each container, you can specify the following resource limits and request: - -[source,terminal] ----- -spec.containers[].resources.limits.cpu -spec.containers[].resources.limits.memory -spec.containers[].resources.limits.hugepages-<size> -spec.containers[].resources.requests.cpu -spec.containers[].resources.requests.memory -spec.containers[].resources.requests.hugepages-<size> ----- - -Although you can specify requests and limits for only individual containers, it is also useful to consider the overall resource requests and limits for a pod. For a particular resource, a container resource request or limit is the sum of the resource requests or limits of that type for each container in the pod. - -.Example container resource requests and limits -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: frontend -spec: - containers: - - name: app - image: images.my-company.example/app:v4 - resources: - requests: <1> - memory: "64Mi" - cpu: "250m" - limits: <2> - memory: "128Mi" - cpu: "500m" - - name: log-aggregator - image: images.my-company.example/log-aggregator:v6 - resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" ----- -<1> The container is requesting 64 Mi of memory and 250 m CPU. -<2> The container's limits are 128 Mi of memory and 500 m CPU. \ No newline at end of file diff --git a/modules/compliance-supported-profiles.adoc b/modules/compliance-supported-profiles.adoc deleted file mode 100644 index 77e6b1203c6f..000000000000 --- a/modules/compliance-supported-profiles.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/ - -:_content-type: CONCEPT -[id="compliance-supported-profiles_{context}"] -= Compliance profiles - -The Compliance Operator provides the following compliance profiles: - -.Supported compliance profiles -[cols="10%,40%,10%,10%,40%,10%", options="header"] - -|=== -|Profile -|Profile title -|Application -|Compliance Operator version -|Industry compliance benchmark -|Supported architectures - -|ocp4-cis -|CIS Red Hat OpenShift Container Platform 4 Benchmark v1.1.0 -|Platform -|0.1.39+ -|link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks ™] ^[1]^ -|`x86_64` - `ppc64le` - `s390x` - -|ocp4-cis-node -|CIS Red Hat OpenShift Container Platform 4 Benchmark v1.1.0 -|Node ^[2]^ -|0.1.39+ -|link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks ™] ^[1]^ -|`x86_64` - `ppc64le` - `s390x` - -|ocp4-e8 -|Australian Cyber Security Centre (ACSC) Essential Eight -|Platform -|0.1.39+ -|link:https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers[ACSC Hardening Linux Workstations and Servers] -|`x86_64` - -|ocp4-moderate -|NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Platform level -|Platform -|0.1.39+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|rhcos4-e8 -|Australian Cyber Security Centre (ACSC) Essential Eight -|Node -|0.1.39+ -|link:https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers[ACSC Hardening Linux Workstations and Servers] -|`x86_64` - -|rhcos4-moderate -|NIST 800-53 Moderate-Impact Baseline for Red Hat Enterprise Linux CoreOS -|Node -|0.1.39+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-moderate-node -|NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Node level -|Node ^[2]^ -|0.1.44+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-nerc-cip -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for the Red Hat OpenShift Container Platform - Platform level -|Platform -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|ocp4-nerc-cip-node -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for the Red Hat OpenShift Container Platform - Node level -|Node ^[2]^ -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|rhcos4-nerc-cip -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for Red Hat Enterprise Linux CoreOS -|Node -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|ocp4-pci-dss -|PCI-DSS v3.2.1 Control Baseline for Red Hat OpenShift Container Platform 4 -|Platform -|0.1.47+ -|link:https://www.pcisecuritystandards.org/document_library?document=pci_dss[PCI Security Standards ® Council Document Library] -|`x86_64` - `ppc64le` - -|ocp4-pci-dss-node -|PCI-DSS v3.2.1 Control Baseline for Red Hat OpenShift Container Platform 4 -|Node ^[2]^ -|0.1.47+ -|link:https://www.pcisecuritystandards.org/document_library?document=pci_dss[PCI Security Standards ® Council Document Library] -|`x86_64` - `ppc64le` - -|ocp4-high -|NIST 800-53 High-Impact Baseline for Red Hat OpenShift - Platform level -|Platform -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-high-node -|NIST 800-53 High-Impact Baseline for Red Hat OpenShift - Node level -|Node ^[2]^ -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` - -|rhcos4-high -|NIST 800-53 High-Impact Baseline for Red Hat Enterprise Linux CoreOS -|Node -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` -|=== -[.small] -1. To locate the CIS {product-title} v4 Benchmark, go to link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks] and type `Kubernetes` in the search box. Click on *Kubernetes* and then *Download Latest CIS Benchmark*, where you can then register to download the benchmark. -2. Node profiles must be used with the relevant Platform profile. For more information, see xref:../../security/compliance_operator/compliance-operator-understanding.adoc#compliance_profile_typesunderstanding-compliance[Compliance Operator profile types]. \ No newline at end of file diff --git a/modules/compliance-tailored-profiles.adoc b/modules/compliance-tailored-profiles.adoc deleted file mode 100644 index 620f2f082470..000000000000 --- a/modules/compliance-tailored-profiles.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-tailor.adoc - -:_content-type: PROCEDURE -[id="compliance-tailored-profiles_{context}"] -= Using tailored profiles to extend existing ProfileBundles -While the `TailoredProfile` CR enables the most common tailoring operations, the XCCDF standard allows even more flexibility in tailoring OpenSCAP profiles. In addition, if your organization has been using OpenScap previously, you may have an existing XCCDF tailoring file and can reuse it. - -The `ComplianceSuite` object contains an optional `TailoringConfigMap` attribute that you can point to a custom tailoring file. The value of the `TailoringConfigMap` attribute is a name of a config map, which must contain a key called `tailoring.xml` and the value of this key is the tailoring contents. - -.Procedure - -. Browse the available rules for the {op-system-first} `ProfileBundle`: -+ -[source,terminal] ----- -$ oc get rules.compliance -n openshift-compliance -l compliance.openshift.io/profile-bundle=rhcos4 ----- - -. Browse the available variables in the same `ProfileBundle`: -+ -[source,terminal] ----- -$ oc get variables.compliance -n openshift-compliance -l compliance.openshift.io/profile-bundle=rhcos4 ----- - -. Create a tailored profile named `nist-moderate-modified`: -.. Choose which rules you want to add to the `nist-moderate-modified` tailored profile. This example extends the `rhcos4-moderate` profile by disabling two rules and changing one value. Use the `rationale` value to describe why these changes were made: -+ -.Example `new-profile-node.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: nist-moderate-modified -spec: - extends: rhcos4-moderate - description: NIST moderate profile - title: My modified NIST moderate profile - disableRules: - - name: rhcos4-file-permissions-var-log-messages - rationale: The file contains logs of error messages in the system - - name: rhcos4-account-disable-post-pw-expiration - rationale: No need to check this as it comes from the IdP - setValues: - - name: rhcos4-var-selinux-state - rationale: Organizational requirements - value: permissive ----- -+ -.Attributes for spec variables -[cols="1,2a",options="header"] -|=== -|Attribute -|Description - -|`extends` -|Name of the `Profile` object upon which this `TailoredProfile` is built. - -|`title` -|Human-readable title of the `TailoredProfile`. - -|`disableRules` -|A list of name and rationale pairs. Each name refers to a name of a rule object that is to be disabled. The rationale value is human-readable text describing why the rule is disabled. - -|`manualRules` -| A list of name and rationale pairs. When a manual rule is added, the check result status will always be `manual` and remediation will not be generated. This attribute is automatic and by default has no values when set as a manual rule. - -|`enableRules` -|A list of name and rationale pairs. Each name refers to a name of a rule object that is to be enabled. The rationale value is human-readable text describing why the rule is enabled. - -|`description` -|Human-readable text describing the `TailoredProfile`. - -|`setValues` -| A list of name, rationale, and value groupings. Each name refers to a name of the value set. The rationale is human-readable text describing the set. The value is the actual setting. -|=== -+ -.. Add the `tailoredProfile.spec.manualRules` attribute: -+ -.Example `tailoredProfile.spec.manualRules.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: ocp4-manual-scc-check -spec: - extends: ocp4-cis - description: This profile extends ocp4-cis by forcing the SCC check to always return MANUAL - title: OCP4 CIS profile with manual SCC check - manualRules: - - name: ocp4-scc-limit-container-allowed-capabilities - rationale: We use third party software that installs its own SCC with extra privileges ----- - -.. Create the `TailoredProfile` object: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f new-profile-node.yaml <1> ----- -<1> The `TailoredProfile` object is created in the default `openshift-compliance` namespace. -+ -.Example output -[source,terminal] ----- -tailoredprofile.compliance.openshift.io/nist-moderate-modified created ----- - -. Define the `ScanSettingBinding` object to bind the new `nist-moderate-modified` tailored profile to the default `ScanSetting` object. -+ -.Example `new-scansettingbinding.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: nist-moderate-modified -profiles: - - apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-moderate - - apiGroup: compliance.openshift.io/v1alpha1 - kind: TailoredProfile - name: nist-moderate-modified -settingsRef: - apiGroup: compliance.openshift.io/v1alpha1 - kind: ScanSetting - name: default ----- - -. Create the `ScanSettingBinding` object: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f new-scansettingbinding.yaml ----- -+ -.Example output -[source,terminal] ----- -scansettingbinding.compliance.openshift.io/nist-moderate-modified created ----- diff --git a/modules/compliance-timeout.adoc b/modules/compliance-timeout.adoc deleted file mode 100644 index 31d75ba13941..000000000000 --- a/modules/compliance-timeout.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="compliance-timeout_{context}"] -= Configuring ScanSetting timeout - -The `ScanSetting` object has a timeout option that can be specified in the `ComplianceScanSetting` object as a duration string, such as `1h30m`. If the scan does not finish within the specified timeout, the scan reattempts until the `maxRetryOnTimeout` limit is reached. - -.Procedure - -* To set a `timeout` and `maxRetryOnTimeout` in ScanSetting, modify an existing `ScanSetting` object: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - rotation: 3 - size: 1Gi -roles: -- worker -- master -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' -timeout: '10m0s' <1> -maxRetryOnTimeout: 3 <2> ----- -<1> The `timeout` variable is defined as a duration string, such as `1h30m`. The default value is `30m`. To disable the timeout, set the value to `0s`. -<2> The `maxRetryOnTimeout` variable defines how many times a retry is attempted. The default value is `3`. \ No newline at end of file diff --git a/modules/compliance-unapplying.adoc b/modules/compliance-unapplying.adoc deleted file mode 100644 index d214ca6ea313..000000000000 --- a/modules/compliance-unapplying.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-unapplying_{context}"] -= Unapplying a remediation -It might be required to unapply a remediation that was previously applied. - -.Procedure -. Set the `apply` flag to `false`: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -patch complianceremediations/rhcos4-moderate-worker-sysctl-net-ipv4-conf-all-accept-redirects \ ---patch '{"spec":{"apply":false}}' --type=merge ----- - -. The remediation status will change to `NotApplied` and the composite `MachineConfig` object would be re-rendered to not include the remediation. -+ -[IMPORTANT] -==== -All affected nodes with the remediation will be rebooted. -==== diff --git a/modules/compliance-update.adoc b/modules/compliance-update.adoc deleted file mode 100644 index 2ed2be580eb1..000000000000 --- a/modules/compliance-update.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: CONCEPT -[id="compliance-update_{context}"] -= Updating security content - -Security content is included as container images that the `ProfileBundle` objects refer to. To accurately track updates to `ProfileBundles` and the custom resources parsed from the bundles such as rules or profiles, identify the container image with the compliance content using a digest instead of a tag: - -[source,terminal] ----- -$ oc -n openshift-compliance get profilebundles rhcos4 -oyaml ----- - -.Example output -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - creationTimestamp: "2022-10-19T12:06:30Z" - finalizers: - - profilebundle.finalizers.compliance.openshift.io - generation: 1 - name: rhcos4 - namespace: openshift-compliance - resourceVersion: "46741" - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d -spec: - contentFile: ssg-rhcos4-ds.xml - contentImage: registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:900e... <1> -status: - conditions: - - lastTransitionTime: "2022-10-19T12:07:51Z" - message: Profile bundle successfully parsed - reason: Valid - status: "True" - type: Ready - dataStreamStatus: VALID ----- -<1> Security container image. - -Each `ProfileBundle` is backed by a deployment. When the Compliance Operator detects that the container image digest has changed, the deployment is updated to reflect the change and parse the content again. Using the digest instead of a tag ensures that you use a stable and predictable set of profiles. diff --git a/modules/compliance-updating.adoc b/modules/compliance-updating.adoc deleted file mode 100644 index 6b0673e27042..000000000000 --- a/modules/compliance-updating.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-updating_{context}"] -= Updating remediations - -When a new version of compliance content is used, it might deliver a new and different version of a remediation than the previous version. The Compliance Operator will keep the old version of the remediation applied. The {product-title} administrator is also notified of the new version to review and apply. A ComplianceRemediation object that had been applied earlier, but was updated changes its status to *Outdated*. The outdated objects are labeled so that they can be searched for easily. - -The previously applied remediation contents would then be stored in the `spec.outdated` attribute of a `ComplianceRemediation` object and the new updated contents would be stored in the `spec.current` attribute. After updating the content to a newer version, the administrator then needs to review the remediation. As long as the `spec.outdated` attribute exists, it would be used to render the resulting `MachineConfig` object. After the `spec.outdated` attribute is removed, the Compliance Operator re-renders the resulting `MachineConfig` object, which causes the Operator to push the configuration to the nodes. - -.Procedure - -. Search for any outdated remediations: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get complianceremediations \ --l complianceoperator.openshift.io/outdated-remediation= ----- -+ -.Example output -[source,terminal] ----- -NAME STATE -workers-scan-no-empty-passwords Outdated ----- -+ -The currently applied remediation is stored in the `Outdated` attribute and the new, unapplied remediation is stored in the `Current` attribute. If you are satisfied with the new version, remove the `Outdated` field. If you want to keep the updated content, remove the `Current` and `Outdated` attributes. - -. Apply the newer version of the remediation: -+ -[source,terminal] ----- -$ oc -n openshift-compliance patch complianceremediations workers-scan-no-empty-passwords \ ---type json -p '[{"op":"remove", "path":/spec/outdated}]' ----- - -. The remediation state will switch from `Outdated` to `Applied`: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance complianceremediations workers-scan-no-empty-passwords ----- -+ -.Example output -[source,terminal] ----- -NAME STATE -workers-scan-no-empty-passwords Applied ----- - -. The nodes will apply the newer remediation version and reboot. diff --git a/modules/compute-machineset-upi-reqs.adoc b/modules/compute-machineset-upi-reqs.adoc deleted file mode 100644 index ff6d5f90e3c0..000000000000 --- a/modules/compute-machineset-upi-reqs.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:vsphere: -endif::[] - -:_content-type: CONCEPT -[id="compute-machineset-upi-reqs_{context}"] -= Requirements for clusters with user-provisioned infrastructure to use compute machine sets - -To use compute machine sets on clusters that have user-provisioned infrastructure, you must ensure that you cluster configuration supports using the Machine API. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/con_auto-reboot-during-argo-cd-sync-with-machine-configurations.adoc b/modules/con_auto-reboot-during-argo-cd-sync-with-machine-configurations.adoc deleted file mode 100644 index 798687c94189..000000000000 --- a/modules/con_auto-reboot-during-argo-cd-sync-with-machine-configurations.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: CONCEPT - -[id="auto-reboot-during-argo-cd-sync-with-machine-configurations"] -= Issue: Auto-reboot during Argo CD sync with machine configurations - -In the Red Hat OpenShift Container Platform, nodes are updated automatically through the Red Hat OpenShift Machine Config Operator (MCO). A Machine Config Operator (MCO) is a custom resource that is used by the cluster to manage the complete life cycle of its nodes. - -When an MCO resource is created or updated in a cluster, the MCO picks up the update, performs the necessary changes to the selected nodes, and restarts the nodes gracefully by cordoning, draining, and rebooting those nodes. It handles everything from the kernel to the kubelet. - -However, interactions between the MCO and the GitOps workflow can introduce major performance issues and other undesired behaviors. This section shows how to make the MCO and the Argo CD GitOps orchestration tool work well together. \ No newline at end of file diff --git a/modules/con_bmo-bare-metal-operator-architecture.adoc b/modules/con_bmo-bare-metal-operator-architecture.adoc deleted file mode 100644 index c263a5df8cf1..000000000000 --- a/modules/con_bmo-bare-metal-operator-architecture.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc -:_content-type: CONCEPT -[id="bmo-bare-metal-operator-architecture_{context}"] -= Bare Metal Operator architecture - -The Bare Metal Operator (BMO) uses three resources to provision, manage, and inspect bare-metal hosts in your cluster. The following diagram illustrates the architecture of these resources: - -image::302_OpenShift_Bare_Metal_Operator_0223.png[BMO architecture overview] - -.BareMetalHost - -The `BareMetalHost` resource defines a physical host and its properties. When you provision a bare-metal host to the cluster, you must define a `BareMetalHost` resource for that host. For ongoing management of the host, you can inspect the information in the `BareMetalHost` or update this information. - -The `BareMetalHost` resource features provisioning information such as the following: - -* Deployment specifications such as the operating system boot image or the custom RAM disk -* Provisioning state -* Baseboard Management Controller (BMC) address -* Desired power state - -The `BareMetalHost` resource features hardware information such as the following: - -* Number of CPUs -* MAC address of a NIC -* Size of the host's storage device -* Current power state - -.HostFirmwareSettings -You can use the `HostFirmwareSettings` resource to retrieve and manage the firmware settings for a host. When a host moves to the `Available` state, the Ironic service reads the host's firmware settings and creates the `HostFirmwareSettings` resource. There is a one-to-one mapping between the `BareMetalHost` resource and the `HostFirmwareSettings` resource. - -You can use the `HostFirmwareSettings` resource to inspect the firmware specifications for a host or to update a host's firmware specifications. - -[NOTE] -==== -You must adhere to the schema specific to the vendor firmware when you edit the `spec` field of the `HostFirmwareSettings` resource. This schema is defined in the read-only `FirmwareSchema` resource. -==== - -.FirmwareSchema -Firmware settings vary among hardware vendors and host models. A `FirmwareSchema` resource is a read-only resource that contains the types and limits for each firmware setting on each host model. The data comes directly from the BMC by using the Ironic service. The `FirmwareSchema` resource enables you to identify valid values you can specify in the `spec` field of the `HostFirmwareSettings` resource. - -A `FirmwareSchema` resource can apply to many `BareMetalHost` resources if the schema is the same. - -[role="_additional-resources"] -.Additional resources -* link:https://metal3.io/[Metal³ API service for provisioning bare-metal hosts] -* link:https://ironicbaremetal.org/[Ironic API service for managing bare-metal infrastructure] diff --git a/modules/config-aws-access.adoc b/modules/config-aws-access.adoc deleted file mode 100644 index b037e04e3e96..000000000000 --- a/modules/config-aws-access.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="config-aws-access_{context}"] - -= Configuring AWS infrastructure access - -// TODO: I see {AWS} and {GCP} only used a handful of time, but their written out form much more. Should all hardcoded instances be updated to use the attributes? -{AWS} infrastructure access allows link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators] and cluster owners to enable AWS Identity and Access Management (IAM) users to have federated access to the AWS Management Console for their {product-title} cluster. Administrators can select between `Network Management` or `Read-only` access options. - -.Prerequisites - -* An AWS account with IAM permissions. - -.Procedure - -. Log in to your AWS account. If necessary, you can create a new AWS account by following the link:https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/[AWS documentation]. - -. Create an IAM user with `STS:AllowAssumeRole` permissions within the AWS account. - -.. Open the link:https://console.aws.amazon.com/iam/home#/home[IAM dashboard] of the AWS Management Console. -.. In the *Policies* section, click *Create Policy*. -.. Select the *JSON* tab and replace the existing text with the following: -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "sts:AssumeRole", - "Resource": "*" - } - ] -} ----- - -.. Click *Next:Tags*. -.. Optional: Add tags. Click *Next:Review* -.. Provide an appropriate name and description, then click *Create Policy*. -.. In the *Users* section, click *Add user*. -.. Provide an appropriate user name. -.. Select *AWS Management Console access* as the AWS access type. -.. Adjust the password requirements as necessary for your organization, then click *Next:Permissions*. -.. Click the *Attach existing policies directly* option. Search for and check the policy created in previous steps. -+ -[NOTE] -==== -It is not recommended to set a permissions boundary. -==== - -.. Click *Next: Tags*, then click *Next: Review*. Confirm the configuration is correct. -.. Click *Create user*, a success page appears. -.. Gather the IAM user’s Amazon Resource Name (ARN). The ARN will have the following format: `arn:aws:iam::000111222333:user/username`. Click *Close*. - -. Open {cluster-manager-url} in your browser and select the cluster you want to allow AWS infrastructure access. - -. Select the *Access control* tab, and scroll to the *AWS Infrastructure Access* section. - -. Paste the *AWS IAM ARN* and select *Network Management* or *Read-only* permissions, then click *Grant role*. - -. Copy the *AWS OSD console URL* to your clipboard. - -. Sign in to your AWS account with your Account ID or alias, IAM user name, and password. - -. In a new browser tab, paste the AWS OSD Console URL that will be used to route to the AWS Switch Role page. - -. Your account number and role will be filled in already. Choose a display name if necessary, then click *Switch Role*. - -.Verification - -* You now see *VPC* under *Recently visited services*. diff --git a/modules/config-github-idp.adoc b/modules/config-github-idp.adoc deleted file mode 100644 index dd6b0033da72..000000000000 --- a/modules/config-github-idp.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-github-idp_{context}"] -= Configuring a GitHub identity provider - - -Configure a GitHub identity provider to validate user names and passwords against GitHub or GitHub Enterprise’s OAuth authentication server and access your {product-title} cluster. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. - -[WARNING] -==== -Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. -==== - -.Prerequisites - -* The OAuth application must be created directly within the GitHub link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/managing-organization-settings[organization settings] by the GitHub organization administrator. -* link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams[GitHub organizations or teams] are set up in your GitHub account. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *GitHub* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will use this to register the GitHub application. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/github ----- - -. link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[Register an application on GitHub]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* and *Client secret* provided by GitHub. - -. Enter a *hostname*. A hostname must be entered when using a hosted instance of GitHub Enterprise. - -. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Select *Use organizations* or *Use teams* to restrict access to a particular GitHub organization or a GitHub team. - -. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams that users can be a member of. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-gitlab-idp.adoc b/modules/config-gitlab-idp.adoc deleted file mode 100644 index ac89b1c27fdb..000000000000 --- a/modules/config-gitlab-idp.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-gitlab-idp_{context}"] -= Configuring a GitLab identity provider - - -Configure a GitLab identity provider to use link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. - -.Prerequisites - -- If you use GitLab version 7.7.0 to 11.0, you connect using the link:http://doc.gitlab.com/ce/integration/oauth_provider.html[OAuth integration]. If you use GitLab version 11.1 or later, you can use link:https://docs.gitlab.com/ce/integration/openid_connect_provider.html[OpenID Connect] (OIDC) to connect instead of OAuth. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *GitLab* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to GitLab. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/gitlab ----- - -. link:https://docs.gitlab.com/ee/integration/oauth_provider.html[Add a new application in GitLab]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* and *Client secret* provided by GitLab. - -. Enter the *URL* of your GitLab provider. - -. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitLab URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-google-idp.adoc b/modules/config-google-idp.adoc deleted file mode 100644 index 7e5069ef999e..000000000000 --- a/modules/config-google-idp.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-google-idp_{context}"] -= Configuring a Google identity provider - - -Configure a Google identity provider to allow users to authenticate with their Google credentials. - -[WARNING] -==== -Using Google as an identity provider allows any Google user to authenticate to your server. -You can limit authentication to members of a specific hosted domain with the -`hostedDomain` configuration attribute. -==== - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *Google* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to Google. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/google ----- - -. Configure a Google identity provider using link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* of a registered Google project and the *Client secret* issued by Google. - -. Enter a hosted domain to restrict users to a Google Apps domain. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-htpasswd-idp.adoc b/modules/config-htpasswd-idp.adoc deleted file mode 100644 index beb8b0a5ca1e..000000000000 --- a/modules/config-htpasswd-idp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -ifeval::["{context}" == "config-identity-providers"] -:osd-distro: -endif::[] -ifeval::["{context}" == "rosa-sts-config-identity-providers"] -:rosa-distro: -endif::[] -ifeval::["{context}" == "rosa-config-identity-providers"] -:rosa-distro: -endif::[] - -:_content-type: PROCEDURE -[id="config-htpasswd-idp_{context}"] -= Configuring an htpasswd identity provider - -Configure an htpasswd identity provider to create a single, static user with cluster administration privileges. You can log in to your cluster as the user to troubleshoot issues. - -[IMPORTANT] -==== -The htpasswd identity provider option is included only to enable the creation of a single, static administration user. htpasswd is not supported as a general-use identity provider for {product-title}. -==== - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select your cluster. - -. Select *Access control* -> *Identity providers*. - -. Click *Add identity provider*. - -. Select *HTPasswd* from the *Identity Provider* drop-down menu. - -. Add a unique name in the *Name* field for the identity provider. - -. Use the suggested username and password for the static user, or create your own. -+ -[NOTE] -==== -The credentials defined in this step are not visible after you select *Add* in the following step. If you lose the credentials, you must recreate the identity provider and define the credentials again. -==== - -. Select *Add* to create the htpasswd identity provider and the single, static user. - -. Grant the static user permission to manage the cluster: -.. Under *Access control* -> *Cluster Roles and Access*, select *Add user*. -.. Enter the *User ID* of the static user that you created in the preceding step. -ifdef::osd-distro[] -.. Select a *Group.* -** If you are installing {product-title} using the Customer Cloud Subscription (CCS) infrastructure type, choose either the `dedicated-admins` or `cluster-admins` group. Users in the `dedicated-admins` group have standard administrative privileges for {product-title}. Users in the `cluster-admins` group have full administrative access to the cluster. -** If you are installing {product-title} using the Red Hat cloud account infrastructure type, the `dedicated-admins` group is automatically selected. -endif::osd-distro[] -ifdef::rosa-distro[] -.. Select a *Group*. Users in the `dedicated-admins` group have standard administrative privileges for {product-title}. Users in the `cluster-admins` group have full administrative access to the cluster. -endif::rosa-distro[] -.. Select *Add user* to grant the administration privileges to the user. - -.Verification - -* The configured htpasswd identity provider is visible on the *Access control* -> *Identity providers* page. -+ -[NOTE] -==== -After creating the identity provider, synchronization usually completes within two minutes. You can log in to the cluster as the user after the htpasswd identity provider becomes available. -==== -* The single, administrative user is visible on the *Access control* -> *Cluster Roles and Access* page. The administration group membership of the user is also displayed. - -ifeval::["{context}" == "config-identity-providers"] -:!osd-distro: -endif::[] -ifeval::["{context}" == "rosa-sts-config-identity-providers"] -:!rosa-distro: -endif::[] -ifeval::["{context}" == "rosa-config-identity-providers"] -:!rosa-distro: -endif::[] diff --git a/modules/config-idp.adoc b/modules/config-idp.adoc deleted file mode 100644 index 576d1083b811..000000000000 --- a/modules/config-idp.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="config-idp_{context}"] -= Configuring an identity provider - -After you have installed {product-title}, you must configure your cluster to use an identity provider. You can then add members to your identity provider to grant them access to your cluster. - -You can configure different identity provider types for your {product-title} cluster. Supported types include GitHub, GitHub Enterprise, GitLab, Google, LDAP, OpenID Connect, and htpasswd identity providers. - -[IMPORTANT] -==== -The htpasswd identity provider option is included only to enable the creation of a single, static administration user. htpasswd is not supported as a general-use identity provider for {product-title}. -==== - -The following procedure configures a GitHub identity provider as an example. - -[WARNING] -==== -Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. -==== - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You have a GitHub user account. -* You created a GitHub organization in your GitHub account. For more information, see link:https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/creating-a-new-organization-from-scratch[Creating a new organization from scratch] in the GitHub documentation. -* If you are restricting user access to a GitHub team, you have created a team within your GitHub organization. For more information, see link:https://docs.github.com/en/organizations/organizing-members-into-teams/creating-a-team[Creating a team] in the GitHub documentation. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Select *Access control* -> *Identity providers*. - -. Select the *GitHub* identity provider type from the *Add identity provider* drop-down menu. - -. Enter a unique name for the identity provider. The name cannot be changed later. - -. Register an OAuth application in your GitHub organization by following the steps in the link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[GitHub documentation]. -+ -[NOTE] -==== -You must register the OAuth app under your GitHub organization. If you register an OAuth application that is not owned by the organization that contains your cluster users or teams, then user authentication to the cluster will not succeed. -==== - -* For the homepage URL in your GitHub OAuth app configuration, specify the `\https://oauth-openshift.apps.<cluster_name>.<cluster_domain>` portion of the *OAuth callback URL* that is automatically generated in the *Add a GitHub identity provider* page on {cluster-manager}. -+ -The following is an example of a homepage URL for a GitHub identity provider: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com ----- - -* For the authorization callback URL in your GitHub OAuth app configuration, specify the full *OAuth callback URL* that is automatically generated in the *Add a GitHub identity provider* page on {cluster-manager}. The full URL has the following syntax: -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- - -. Return to the *Edit identity provider: GitHub* dialog in {cluster-manager-url} and select *Claim* from the *Mapping method* drop-down menu. - -. Enter the *Client ID* and *Client secret* for your GitHub OAuth application. The GitHub page for your OAuth app provides the ID and secret. - -. Optional: Enter a *hostname*. -+ -[NOTE] -==== -A hostname must be entered when using a hosted instance of GitHub Enterprise. -==== - -. Optional: You can specify a certificate authority (CA) file to validate server certificates for a configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Select *Use organizations* or *Use teams* to restrict access to a GitHub organization or a GitHub team within an organization. - -. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams. -+ -[NOTE] -==== -Specified organizations must own an OAuth app that was registered by using the preceding steps. If you specify a team, it must exist within an organization that owns an OAuth app that was registered by using the preceding steps. -==== - -. Click *Add* to apply the identity provider configuration. -+ -[NOTE] -==== -It might take approximately two minutes for the identity provider configuration to become active. -==== - -.Verification - -* After the configuration becomes active, the identity provider is listed under *Access control* -> *Identity providers* on the {cluster-manager-url} page for your cluster. diff --git a/modules/config-ldap-idp.adoc b/modules/config-ldap-idp.adoc deleted file mode 100644 index 71b855efff34..000000000000 --- a/modules/config-ldap-idp.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-ldap-idp_{context}"] -= Configuring a LDAP identity provider - - -Configure the LDAP identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. - -.Prerequisites - -* When configuring a LDAP identity provider, you will need to enter a configured *LDAP URL*. The configured URL is an RFC 2255 URL, which specifies the LDAP host and -search parameters to use. The syntax of the URL is: -+ ----- -ldap://host:port/basedn?attribute?scope?filter ----- -+ -[cols="2a,8a",options="header"] -|=== -|URL component | Description -.^|`ldap` | For regular LDAP, use the string `ldap`. For secure LDAP -(LDAPS), use `ldaps` instead. -.^|`host:port` | The name and port of the LDAP server. Defaults to -`localhost:389` for ldap and `localhost:636` for LDAPS. -.^|`basedn` | The DN of the branch of the directory where all searches should -start from. At the very least, this must be the top of your directory tree, but -it could also specify a subtree in the directory. -.^|`attribute` | The attribute to search for. Although RFC 2255 allows a -comma-separated list of attributes, only the first attribute will be used, no -matter how many are provided. If no attributes are provided, the default is to -use `uid`. It is recommended to choose an attribute that will be unique across -all entries in the subtree you will be using. -.^|`scope` | The scope of the search. Can be either `one` or `sub`. -If the scope is not provided, the default is to use a scope of `sub`. -.^|`filter` | A valid LDAP search filter. If not provided, defaults to -`(objectClass=*)` -|=== -+ -When doing searches, the attribute, filter, and provided user name are combined -to create a search filter that looks like: -+ ----- -(&(<filter>)(<attribute>=<username>)) ----- -+ -[IMPORTANT] -If the LDAP directory requires authentication to search, specify a `bindDN` and -`bindPassword` to use to perform the entry search. - - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *LDAP* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. - -. Select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter a *LDAP URL* to specify the LDAP search parameters to use. - -. Optional: Enter a *Bind DN* and *Bind password*. - -. Enter the attributes that will map LDAP attributes to identities. -** Enter an *ID* attribute whose value should be used as the user ID. Click *Add more* to add multiple ID attributes. -** Optional: Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple preferred username attributes. -** Optional: Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. - -. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your LDAP identity provider to validate server certificates for the configured URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Optional: Under the advanced options, you can choose to make the LDAP provider *Insecure*. If you select this option, a CA file cannot be used. -+ -[IMPORTANT] -==== -If you are using an insecure LDAP connection (ldap:// or port 389), then you must check the *Insecure* option in the configuration wizard. -==== - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-openid-idp.adoc b/modules/config-openid-idp.adoc deleted file mode 100644 index 00e66b2c8eaa..000000000000 --- a/modules/config-openid-idp.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-openid-idp_{context}"] -= Configuring an OpenID identity provider - - -Configure an OpenID identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -[IMPORTANT] -==== -The Authentication Operator in {product-title} requires that the configured -OpenID Connect identity provider implements the -link:https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] -specification. -==== - -Claims are read from the JWT `id_token` returned from the OpenID identity -provider and, if specified, from the JSON returned by the Issuer URL. - -At least one claim must be configured to use as the user's identity. - -You can also indicate which claims to use as the user's preferred user name, -display name, and email address. If multiple claims are specified, the first one -with a non-empty value is used. The standard claims are: - -[cols="1,2",options="header"] -|=== - -|Claim -|Description - -|`preferred_username` -|The preferred user name when provisioning a user. A -shorthand name that the user wants to be referred to as, such as `janedoe`. Typically -a value that corresponding to the user's login or username in the authentication -system, such as username or email. - -|`email` -|Email address. - -|`name` -|Display name. - -|=== - -See the -link:http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID claims documentation] -for more information. - -.Prerequisites -* Before you configure OpenID Connect, check the installation prerequisites for any Red Hat product or service you want to use with your {product-title} cluster. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *OpenID* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/openid ----- - -. Register a new OpenID Connect client in the OpenID identity provider by following the steps to link:https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest[create an authorization request]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter a *Client ID* and *Client secret* provided from OpenID. - -. Enter an *Issuer URL*. This is the URL that the OpenID provider asserts as the Issuer Identifier. It must use the https scheme with no URL query parameters or fragments. - -. Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. - -. Enter a *Name* attribute whose value should be used as the preferred username. Click *Add more* to add multiple preferred usernames. - -. Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple display names. - -. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your OpenID identity provider. - -. Optional: Under the advanced options, you can add *Additional scopes*. By default, the `OpenID` scope is requested. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/configmap-adding-ca.adoc b/modules/configmap-adding-ca.adoc deleted file mode 100644 index 7240c17f228b..000000000000 --- a/modules/configmap-adding-ca.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/setting-up-trusted-ca - -:_content-type: PROCEDURE -[id="configmap-adding-ca_{context}"] -= Adding certificate authorities to the cluster - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -You can add certificate authorities (CA) to the cluster for use when pushing and pulling images with the following procedure. - -.Prerequisites - -ifdef::openshift-rosa[] -* You must have cluster administrator privileges. -endif::[] -ifdef::openshift-dedicated[] -* You must have at least dedicated administrator privileges. -endif::[] -* You must have access to the public certificates of the registry, usually a `hostname/ca.crt` file located in the `/etc/docker/certs.d/` directory. - -.Procedure - -. Create a `ConfigMap` in the `openshift-config` namespace containing the trusted certificates for the registries that use self-signed certificates. For each CA file, ensure the key in the `ConfigMap` is the hostname of the registry in the `hostname[..port]` format: -+ -[source,terminal] ----- -$ oc create configmap registry-cas -n openshift-config \ ---from-file=myregistry.corp.com..5000=/etc/docker/certs.d/myregistry.corp.com:5000/ca.crt \ ---from-file=otherregistry.com=/etc/docker/certs.d/otherregistry.com/ca.crt ----- - -. Update the cluster image configuration: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-cas"}}}' --type=merge ----- -endif::[] diff --git a/modules/configmap-removing-ca.adoc b/modules/configmap-removing-ca.adoc deleted file mode 100644 index 729ab0cd96be..000000000000 --- a/modules/configmap-removing-ca.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/setting-up-trusted-ca - -:_content-type: PROCEDURE -[id="configmap-removing-ca_{context}"] -= Removing certificate authorities on a {product-title} cluster - -You can remove certificate authorities (CA) from your cluster with the {product-title} (ROSA) CLI, `rosa`. - -.Prerequisites - -* You must have cluster administrator privileges. -* You have installed the ROSA CLI (`rosa`). -* Your cluster has certificate authorities added. - -.Procedure - -* Use the `rosa edit` command to modify the CA trust bundle. You must pass empty strings to the `--additional-trust-bundle-file` argument to clear the trust bundle from the cluster: -+ -[source,terminal] ----- -$ rosa edit cluster -c <cluster_name> --additional-trust-bundle-file "" ----- -+ -.Example Output -+ -[source,yaml] ----- -I: Updated cluster <cluster_name> ----- - -.Verification - -* You can verify that the trust bundle has been removed from the cluster by using the `rosa describe` command: -+ -[source,yaml] ----- -$ rosa describe cluster -c <cluster_name> ----- -+ -Before removal, the Additional trust bundle section appears, redacting its value for security purposes: -+ -[source,yaml,subs="attributes+"] ----- -Name: <cluster_name> -ID: <cluster_internal_id> -External ID: <cluster_external_id> -OpenShift Version: {product-version}.0 -Channel Group: stable -DNS: <dns> -AWS Account: <aws_account_id> -API URL: <api_url> -Console URL: <console_url> -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: <service_cidr> - - Machine CIDR: <machine_cidr> - - Pod CIDR: <pod_cidr> - - Host Prefix: <host_prefix> -Proxy: - - HTTPProxy: <proxy_url> -Additional trust bundle: REDACTED ----- -+ -After removing the proxy, the Additional trust bundle section is removed: -+ -[source,yaml,subs="attributes+"] ----- -Name: <cluster_name> -ID: <cluster_internal_id> -External ID: <cluster_external_id> -OpenShift Version: {product-version}.0 -Channel Group: stable -DNS: <dns> -AWS Account: <aws_account_id> -API URL: <api_url> -Console URL: <console_url> -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: <service_cidr> - - Machine CIDR: <machine_cidr> - - Pod CIDR: <pod_cidr> - - Host Prefix: <host_prefix> -Proxy: - - HTTPProxy: <proxy_url> ----- diff --git a/modules/configuration-ovnk-network-plugin-json-object.adoc b/modules/configuration-ovnk-network-plugin-json-object.adoc deleted file mode 100644 index c2192071ae4b..000000000000 --- a/modules/configuration-ovnk-network-plugin-json-object.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: REFERENCE -[id="configuration-ovnk-network-plugin-json-object_{context}"] -= OVN-Kubernetes network plugin JSON configuration table - -The following table describes the configuration parameters for the OVN-Kubernetes CNI network plugin: - -.OVN-Kubernetes network plugin JSON configuration table -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The required value is `0.3.1`. - -|`name` -|`string` -|The name of the network. These networks are not namespaced. For example, you can have a network named -`l2-network` referenced from two different `NetworkAttachmentDefinitions` that exist on two different -namespaces. This ensures that pods making use of the `NetworkAttachmentDefinition` on their own different -namespaces can communicate over the same secondary network. However, those two different `NetworkAttachmentDefinitions` must also share the same network specific parameters such as `topology`, `subnets`, `mtu`, and `excludeSubnets`. - -|`type` -|`string` -|The name of the CNI plugin to configure. The required value is `ovn-k8s-cni-overlay`. - -|`topology` -|`string` -|The topological configuration for the network. The required value is `layer2`. - -|`subnets` -|`string` -| The subnet to use for the network across the cluster. When specifying `layer2` for the `topology`, only include the CIDR for the node. For example, `10.100.200.0/24`. - -For `"topology":"layer2"` deployments, IPv6 (`2001:DBB::/64`) and dual-stack (`192.168.100.0/24,2001:DBB::/64`) subnets are supported. - -|`mtu` -|`string` -|The maximum transmission unit (MTU) to the specified value. The default value, `1300`, is automatically set by the kernel. - -|`netAttachDefName` -|`string` -|The metadata `namespace` and `name` of the network attachment definition object where this -configuration is included. For example, if this configuration is defined in a `NetworkAttachmentDefinition` in namespace `ns1` named `l2-network`, this should be set to `ns1/l2-network`. - -|`excludeSubnets` -|`string` -|A comma-separated list of CIDRs and IPs. IPs are removed from the assignable IP pool, and are never passed to the pods. When omitted, the logical switch implementing the network only provides layer 2 communication, and users must configure IPs for the pods. Port security only prevents MAC spoofing. - -|==== \ No newline at end of file diff --git a/modules/configuration-resource-overview.adoc b/modules/configuration-resource-overview.adoc deleted file mode 100644 index ceef1e265043..000000000000 --- a/modules/configuration-resource-overview.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="configuration-resource-overview_{context}"] -= About Configuration Resources in {product-title} - -You perform many customization and configuration tasks after you deploy your -cluster, including configuring networking and setting your identity provider. - -In {product-title}, you modify Configuration Resources to determine the behavior -of these integrations. The Configuration Resources are controlled by Operators -that are managed by the Cluster Version Operator, which manages all of the -Operators that run your cluster's control plane. - -You can customize the following Configuration Resources: - -[cols="3a,8a",options="header"] -|=== - -|Configuration Resource |Description -|Authentication -| - -|DNS -| - -|Samples -| * *ManagementState:* -** *Managed.* The operator updates the samples as the configuration dictates. -** *Unmanaged.* The operator ignores updates to the samples resource object and -any imagestreams or templates in the `openshift` namespace. -** *Removed.* The operator removes the set of managed imagestreams -and templates in the `openshift` namespace. It ignores new samples created by -the cluster administrator or any samples in the skipped lists. After the removals are -complete, the operator works like it is in the `Unmanaged` state and ignores -any watch events on the sample resources, imagestreams, or templates. It -operates on secrets to facilitate the CENTOS to RHEL switch. There are some -caveats around concurrent create and removal. -* *Samples Registry:* Overrides the registry from which images are imported. -* *Architecture:* Place holder to choose an architecture type. Currently only x86 -is supported. -* *Skipped Imagestreams:* Imagestreams that are in the operator's -inventory, but that the cluster administrator wants the operator to ignore or not manage. -* *Skipped Templates:* Templates that are in the operator's inventory, but that -the cluster administrator wants the operator to ignore or not manage. - -|Infrastructure -| - -|Ingress -| - -|Network -| - -|OAuth -| - -|=== - -While you can complete many other customizations and configure other integrations -with an {product-title} cluster, configuring these resources is a common first -step after you deploy a cluster. - -Like all Operators, the Configuration Resources are governed by -Custom Resource Definitions (CRD). You customize the CRD for each -Configuration Resource that you want to modify in your cluster. diff --git a/modules/configure-web-terminal-image-admin.adoc b/modules/configure-web-terminal-image-admin.adoc deleted file mode 100644 index 938567a40007..000000000000 --- a/modules/configure-web-terminal-image-admin.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE - -[id="configure-web-terminal-image-admin_{context}"] -= Configuring the web terminal image for all users - -You can use the *Administrator* perspective of the web console to set the default web terminal image for all users. - -.Prerequisites - -* You have cluster administrator permissions and are logged in to the web console. -* You have installed the {web-terminal-op}. - -include::snippets/access-cluster-configuration-console.adoc[] - -. Click the *Web Terminal* tab, which opens the *Web Terminal Configuration* page. -. Enter the URL of the image that you want to use. -. Click *Save*. \ No newline at end of file diff --git a/modules/configure-web-terminal-timeout-admin.adoc b/modules/configure-web-terminal-timeout-admin.adoc deleted file mode 100644 index 44a4b39ee51c..000000000000 --- a/modules/configure-web-terminal-timeout-admin.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE - -[id="configure-web-terminal-timeout-admin_{context}"] -= Configuring the web terminal timeout for all users - -You can use the *Administrator* perspective of the web console to set the default web terminal timeout period for all users. - -.Prerequisites - -* You have cluster administrator permissions and are logged in to the web console. -* You have installed the {web-terminal-op}. - -include::snippets/access-cluster-configuration-console.adoc[] - -. Click the *Web Terminal* tab, which opens the *Web Terminal Configuration* page. -. Set a value for the timeout. From the drop-down list, select a time interval of *Seconds*, *Minutes*, *Hours*, or *Milliseconds*. -. Click *Save*. \ No newline at end of file diff --git a/modules/configure-workloads.adoc b/modules/configure-workloads.adoc deleted file mode 100644 index fe6bbe2658d0..000000000000 --- a/modules/configure-workloads.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module is included in the following assemblies: -// -// * cicd/gitops/configuring-resource-quota.adoc - -:_content-type: PROCEDURE -[id="configuring-workloads_{context}"] -= Configuring workloads with resource requests and limits - -[role="_abstract"] -You can create Argo CD custom resource workloads with resource requests and limits. This is required when you want to deploy the Argo CD instance in a namespace that is configured with resource quotas. - -The following Argo CD instance deploys the Argo CD workloads such as `Application Controller`, `ApplicationSet Controller`, `Dex`, `Redis`,`Repo Server`, and `Server` with resource requests and limits. You can also create the other workloads with resource requirements in the same manner. - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example -spec: - server: - resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 125m - memory: 128Mi - route: - enabled: true - applicationSet: - resources: - limits: - cpu: '2' - memory: 1Gi - requests: - cpu: 250m - memory: 512Mi - repo: - resources: - limits: - cpu: '1' - memory: 512Mi - requests: - cpu: 250m - memory: 256Mi - dex: - resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 250m - memory: 128Mi - redis: - resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 250m - memory: 128Mi - controller: - resources: - limits: - cpu: '2' - memory: 2Gi - requests: - cpu: 250m - memory: 1Gi ----- - diff --git a/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc b/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc deleted file mode 100644 index 72593eab1508..000000000000 --- a/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc -:_content-type: PROCEDURE - -[id="configuring-a-provisioning-resource-to-scale-user-provisioned-clusters_{context}"] -= Configuring a provisioning resource to scale user-provisioned clusters - -Create a `Provisioning` custom resource (CR) to enable Metal platform components on a user-provisioned infrastructure cluster. - -.Prerequisites - -* You installed a user-provisioned infrastructure cluster on bare metal. - -.Procedure - -. Create a `Provisioning` CR. - -.. Save the following YAML in the `provisioning.yaml` file: -+ -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-configuration -spec: - provisioningNetwork: "Disabled" - watchAllNamespaces: false ----- -+ -[NOTE] -==== -{product-title} {product-version} does not support enabling a provisioning network when you scale a user-provisioned cluster by using the Bare Metal Operator. -==== - -. Create the `Provisioning` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f provisioning.yaml ----- -+ -.Example output -[source,terminal] ----- -provisioning.metal3.io/provisioning-configuration created ----- - -.Verification - -* Verify that the provisioning service is running by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-autoscaler-operator-678c476f4c-jjdn5 2/2 Running 0 5d21h -cluster-baremetal-operator-6866f7b976-gmvgh 2/2 Running 0 5d21h -control-plane-machine-set-operator-7d8566696c-bh4jz 1/1 Running 0 5d21h -ironic-proxy-64bdw 1/1 Running 0 5d21h -ironic-proxy-rbggf 1/1 Running 0 5d21h -ironic-proxy-vj54c 1/1 Running 0 5d21h -machine-api-controllers-544d6849d5-tgj9l 7/7 Running 1 (5d21h ago) 5d21h -machine-api-operator-5c4ff4b86d-6fjmq 2/2 Running 0 5d21h -metal3-6d98f84cc8-zn2mx 5/5 Running 0 5d21h -metal3-image-customization-59d745768d-bhrp7 1/1 Running 0 5d21h ----- - diff --git a/modules/configuring-a-proxy-after-installation-cli.adoc b/modules/configuring-a-proxy-after-installation-cli.adoc deleted file mode 100644 index bee9d7ea26e6..000000000000 --- a/modules/configuring-a-proxy-after-installation-cli.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-after-installation-cli_{context}"] -= Configuring a proxy after installation using the CLI - -You can use the {product-title} (ROSA) CLI (`rosa`) to add a cluster-wide proxy configuration to an existing ROSA cluster in a Virtual Private Cloud (VPC). - -You can also use `rosa` to update an existing cluster-wide proxy configuration. For example, you might need to update the network address for the proxy or replace the additional trust bundle if any of the certificate authorities for the proxy expire. - -[IMPORTANT] -==== -The cluster applies the proxy configuration to the control plane and compute nodes. While applying the configuration, each cluster node is temporarily placed in an unschedulable state and drained of its workloads. Each node is restarted as part of the process. -==== - -.Prerequisites - -* You have installed and configured the latest ROSA (`rosa`) and OpenShift (`oc`) CLIs on your installation host. -* You have a ROSA cluster that is deployed in a VPC. - -.Procedure - -* Edit the cluster configuration to add or update the cluster-wide proxy details: -+ -[source,terminal] ----- -$ rosa edit cluster \ - --cluster $CLUSTER_NAME \ - --additional-trust-bundle-file <path_to_ca_bundle_file> \ <1> <2> <3> - --http-proxy http://<username>:<password>@<ip>:<port> \ <1> <4> - --https-proxy https://<username>:<password>@<ip>:<port> \ <1> <4> - --no-proxy example.com <5> ----- -+ --- -<1> The `additional-trust-bundle-file`, `http-proxy`, and `https-proxy` arguments are all optional. -<2> If you use the `additional-trust-bundle-file` argument without an `http-proxy` or `https-proxy` argument, the trust bundle is added to the trust store and used to verify cluster system egress traffic. In that scenario, the bundle is not configured to be used with a proxy. -<3> The `additional-trust-bundle-file` argument is a file path pointing to a bundle of PEM-encoded X.509 certificates, which are all concatenated together. The `additionalTrustBundle` parameter is required unless the identity certificate of the proxy is signed by an authority from the {op-system} trust bundle. If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional CAs, you must provide the MITM CA certificate. -+ -[NOTE] -==== -You should not attempt to change the proxy or additional trust bundle configuration on the cluster directly. These changes must be applied by using the ROSA CLI (`rosa`) or {cluster-manager-first}. Any changes that are made directly to the cluster will be reverted automatically. -==== -<4> The `http-proxy` and `https-proxy` arguments must point to a valid URL. -<5> A comma-separated list of destination domain names, IP addresses, or network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` or `httpsProxy` fields are set. --- - -.Verification - -. List the status of the machine config pools and verify that they are updated: -+ -[source,terminal] ----- -$ oc get machineconfigpools ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-d9a03f612a432095dcde6dcf44597d90 True False False 3 3 3 0 31h -worker rendered-worker-f6827a4efe21e155c25c21b43c46f65e True False False 6 6 6 0 31h ----- - -. Display the proxy configuration for your cluster and verify that the details are as expected: -+ -[source,terminal] ----- -$ oc get proxy cluster -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -spec: - httpProxy: http://proxy.host.domain:<port> - httpsProxy: https://proxy.host.domain:<port> - <...more...> -status: - httpProxy: http://proxy.host.domain:<port> - httpsProxy: https://proxy.host.domain:<port> - <...more...> ----- diff --git a/modules/configuring-a-proxy-after-installation-ocm.adoc b/modules/configuring-a-proxy-after-installation-ocm.adoc deleted file mode 100644 index 6ede7996208f..000000000000 --- a/modules/configuring-a-proxy-after-installation-ocm.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-after-installation-ocm_{context}"] -= Configuring a proxy after installation using {cluster-manager} - -You can use {cluster-manager-first} to add a cluster-wide proxy configuration to an existing {product-title} cluster in a Virtual Private Cloud (VPC). -ifdef::openshift-dedicated[] -You can enable a proxy only for clusters that use the Customer Cloud Subscription (CCS) model. -endif::openshift-dedicated[] - -You can also use {cluster-manager} to update an existing cluster-wide proxy configuration. For example, you might need to update the network address for the proxy or replace the additional trust bundle if any of the certificate authorities for the proxy expire. - -[IMPORTANT] -==== -The cluster applies the proxy configuration to the control plane and compute nodes. While applying the configuration, each cluster node is temporarily placed in an unschedulable state and drained of its workloads. Each node is restarted as part of the process. -==== - -.Prerequisites - -* You have an {product-title} cluster -ifdef::openshift-dedicated[] - that uses the Customer Cloud Subscription (CCS) model -endif::openshift-dedicated[] -. -* Your cluster is deployed in a VPC. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Under the *Virtual Private Cloud (VPC)* section on the *Networking* page, click *Edit cluster-wide proxy*. - -. On the *Edit cluster-wide proxy* page, provide your proxy configuration details: -.. Enter a value in at least one of the following fields: -** Specify a valid *HTTP proxy URL*. -** Specify a valid *HTTPS proxy URL*. -** In the *Additional trust bundle* field, provide a PEM encoded X.509 certificate bundle. If you are replacing an existing trust bundle file, select *Replace file* to view the field. The bundle is added to the trusted certificate store for the cluster nodes. An additional trust bundle file is required unless the identity certificate for the proxy is signed by an authority from the {op-system-first} trust bundle. -+ -If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional certificate authorities (CAs), you must provide the MITM CA certificate. -+ -[NOTE] -==== -If you upload an additional trust bundle file without specifying an HTTP or HTTPS proxy URL, the bundle is set on the cluster but is not configured to be used with the proxy. -==== -.. Click *Confirm*. - -.Verification - -* Under the *Virtual Private Cloud (VPC)* section on the *Networking* page, verify that the proxy configuration for your cluster is as expected. diff --git a/modules/configuring-a-proxy-during-installation-cli.adoc b/modules/configuring-a-proxy-during-installation-cli.adoc deleted file mode 100644 index 95315d2121a0..000000000000 --- a/modules/configuring-a-proxy-during-installation-cli.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-during-installation-cli_{context}"] -= Configuring a proxy during installation using the CLI - -If you are installing a {product-title} (ROSA) cluster into an existing Virtual Private Cloud (VPC), you can use the ROSA CLI (`rosa`) to enable a cluster-wide HTTP or HTTPS proxy during installation. - -The following procedure provides details about the ROSA CLI (`rosa`) arguments that are used to configure a cluster-wide proxy during installation. For general installation steps using the ROSA CLI, see _Creating a cluster with customizations using the CLI_. - -.Prerequisites - -* You have verified that the proxy is accessible from the VPC that the cluster is being installed into. The proxy must also be accessible from the private subnets of the VPC. - - -.Procedure -* Specify a proxy configuration when you create your cluster: -+ -[source,terminal] ----- -$ rosa create cluster \ - <other_arguments_here> \ - --additional-trust-bundle-file <path_to_ca_bundle_file> \ <1> <2> <3> - --http-proxy http://<username>:<password>@<ip>:<port> \ <1> <4> - --https-proxy https://<username>:<password>@<ip>:<port> \ <1> <4> - --no-proxy example.com <5> ----- -+ --- -<1> The `additional-trust-bundle-file`, `http-proxy`, and `https-proxy` arguments are all optional. -<2> If you use the `additional-trust-bundle-file` argument without an `http-proxy` or `https-proxy` argument, the trust bundle is added to the trust store and used to verify cluster system egress traffic. In that scenario, the bundle is not configured to be used with a proxy. -<3> The `additional-trust-bundle-file` argument is a file path pointing to a bundle of PEM-encoded X.509 certificates, which are all concatenated together. The `additionalTrustBundle` parameter is required unless the identity certificate of the proxy is signed by an authority from the {op-system} trust bundle. If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional CAs, you must provide the MITM CA certificate. -<4> The `http-proxy` and `https-proxy` arguments must point to a valid URL. -<5> A comma-separated list of destination domain names, IP addresses, or network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` or `httpsProxy` fields are set. --- \ No newline at end of file diff --git a/modules/configuring-a-proxy-during-installation-ocm.adoc b/modules/configuring-a-proxy-during-installation-ocm.adoc deleted file mode 100644 index be66fe5fcd49..000000000000 --- a/modules/configuring-a-proxy-during-installation-ocm.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="configuring-a-proxy-during-installation-ocm_{context}"] -= Configuring a proxy during installation using {cluster-manager} - -If you are installing -ifdef::openshift-dedicated[] -an {product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -a {product-title} (ROSA) -endif::openshift-rosa[] -cluster into an existing Virtual Private Cloud (VPC), you can use {cluster-manager-first} to enable a cluster-wide HTTP or HTTPS proxy during installation. -ifdef::openshift-dedicated[] -You can enable a proxy only for clusters that use the Customer Cloud Subscription (CCS) model. -endif::openshift-dedicated[] - -Prior to the installation, you must verify that the proxy is accessible from the VPC that the cluster is being installed into. The proxy must also be accessible from the private subnets of the VPC. - -ifdef::openshift-dedicated[] -For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster on AWS with CCS_ or _Creating a cluster on GCP with CCS_. -endif::openshift-dedicated[] - -ifdef::openshift-rosa[] -For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster with customizations by using OpenShift Cluster Manager_. -endif::openshift-rosa[] diff --git a/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc b/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc deleted file mode 100644 index b0d559d590ea..000000000000 --- a/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="configuring-a-proxy-trust-bundle-responsibilities_{context}"] -= Responsibilities for additional trust bundles - -If you supply an additional trust bundle, you are responsible for the following requirements: - -* Ensuring that the contents of the additional trust bundle are valid -* Ensuring that the certificates, including intermediary certificates, contained in the additional trust bundle have not expired -* Tracking the expiry and performing any necessary renewals for certificates contained in the additional trust bundle -* Updating the cluster configuration with the updated additional trust bundle diff --git a/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc b/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc deleted file mode 100644 index 6e145452cda6..000000000000 --- a/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-installing-albo-on-sts-cluster-predefined-credentials_{context}"] -= Configuring the AWS Load Balancer Operator on Security Token Service cluster by using specific credentials - -You can specify the credential secret by using the `spec.credentials` field in the AWS Load Balancer Controller custom resource (CR). You can use the predefined `CredentialsRequest` object of the controller to know which roles are required. - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. Download the CredentialsRequest custom resource (CR) of the AWS Load Balancer Controller, and create a directory to store it by running the following command: -+ -[source,terminal] ----- -$ curl --create-dirs -o <path-to-credrequests-dir>/cr.yaml https://raw.githubusercontent.com/openshift/aws-load-balancer-operator/main/hack/controller/controller-credentials-request.yaml ----- - -. Use the `ccoctl` tool to process the `CredentialsRequest` object of the controller: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name <name> --region=<aws_region> \ - --credentials-requests-dir=<path-to-credrequests-dir> \ - --identity-provider-arn <oidc-arn> ----- - -. Apply the secrets to your cluster: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify the credentials secret has been created for use by the controller: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get secret aws-load-balancer-controller-manual-cluster --template='{{index .data "credentials"}}' | base64 -d ----- -+ -.Example output ----- -[default] - sts_regional_endpoints = regional - role_arn = arn:aws:iam::999999999999:role/aws-load-balancer-operator-aws-load-balancer-controller - web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- - -. Create the `AWSLoadBalancerController` resource YAML file, for example, `sample-aws-lb-manual-creds.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController <1> -metadata: - name: cluster <2> -spec: - credentials: - name: <secret-name> <3> ----- -<1> Defines the `AWSLoadBalancerController` resource. -<2> Defines the AWS Load Balancer Controller instance name. This instance name gets added as a suffix to all related resources. -<3> Specifies the secret name containing AWS credentials that the controller uses. - - diff --git a/modules/configuring-albo-on-sts-cluster.adoc b/modules/configuring-albo-on-sts-cluster.adoc deleted file mode 100644 index fc9d42a54f85..000000000000 --- a/modules/configuring-albo-on-sts-cluster.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-installing-albo-on-sts-cluster_{context}"] -= Configuring AWS Load Balancer Operator on Security Token Service cluster by using managed `CredentialsRequest` objects - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. The AWS Load Balancer Operator creates the `CredentialsRequest` object in the `openshift-cloud-credential-operator` namespace for each `AWSLoadBalancerController` custom resource (CR). You can extract and save the created `CredentialsRequest` object in a directory by running the following command: -+ -[source,terminal] ----- -$ oc get credentialsrequest -n openshift-cloud-credential-operator \ - aws-load-balancer-controller-<cr-name> -o yaml > <path-to-credrequests-dir>/cr.yaml <1> ----- -<1> The `aws-load-balancer-controller-<cr-name>` parameter specifies the credential request name created by the AWS Load Balancer Operator. The `cr-name` specifies the name of the AWS Load Balancer Controller instance. - -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name <name> --region=<aws_region> \ - --credentials-requests-dir=<path-to-credrequests-dir> \ - --identity-provider-arn <oidc-arn> ----- - -. Apply the secrets generated in manifests directory to your cluster, by running the following command: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify that the `aws-load-balancer-controller` pod is created: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get pods -NAME READY STATUS RESTARTS AGE -aws-load-balancer-controller-cluster-9b766d6-gg82c 1/1 Running 0 137m -aws-load-balancer-operator-controller-manager-b55ff68cc-85jzg 2/2 Running 0 3h26m ----- diff --git a/modules/configuring-cluster-monitoring.adoc b/modules/configuring-cluster-monitoring.adoc deleted file mode 100644 index 03fb61e3e452..000000000000 --- a/modules/configuring-cluster-monitoring.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc - -:_content-type: PROCEDURE -[id="configuring-cluster-monitoring_{context}"] -= Configuring cluster monitoring - -[role="_abstract"] -You can increase the storage capacity for the Prometheus component in the cluster monitoring stack. - -.Procedure - -To increase the storage capacity for Prometheus: - -. Create a YAML configuration file, `cluster-monitoring-config.yaml`. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -data: - config.yaml: | - prometheusK8s: - retention: {{PROMETHEUS_RETENTION_PERIOD}} <1> - nodeSelector: - node-role.kubernetes.io/infra: "" - volumeClaimTemplate: - spec: - storageClassName: {{STORAGE_CLASS}} <2> - resources: - requests: - storage: {{PROMETHEUS_STORAGE_SIZE}} <3> - alertmanagerMain: - nodeSelector: - node-role.kubernetes.io/infra: "" - volumeClaimTemplate: - spec: - storageClassName: {{STORAGE_CLASS}} <2> - resources: - requests: - storage: {{ALERTMANAGER_STORAGE_SIZE}} <4> -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring ----- -<1> A typical value is `PROMETHEUS_RETENTION_PERIOD=15d`. Units are measured in -time using one of these suffixes: s, m, h, d. -<2> The storage class for your cluster. -<3> A typical value is `PROMETHEUS_STORAGE_SIZE=2000Gi`. Storage values can be a -plain integer or as a fixed-point integer using one of these suffixes: E, P, T, -G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. -<4> A typical value is `ALERTMANAGER_STORAGE_SIZE=20Gi`. Storage values can be a -plain integer or as a fixed-point integer using one of these suffixes: E, P, T, -G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. - -. Add values for the retention period, storage class, and storage sizes. - -. Save the file. - -. Apply the changes by running: -+ -[source,terminal] ----- -$ oc create -f cluster-monitoring-config.yaml ----- diff --git a/modules/configuring-default-seccomp-profile.adoc b/modules/configuring-default-seccomp-profile.adoc deleted file mode 100644 index 17b3a5adfa68..000000000000 --- a/modules/configuring-default-seccomp-profile.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * security/seccomp-profiles.adoc - -:_content-type: PROCEDURE - -[id="verifying-default-seccomp-profile_{context}"] -= Verifying the default seccomp profile applied to a pod - -{product-title} ships with a default seccomp profile that is referenced as `runtime/default`. In {product-version}, newly created pods have the Security Context Constraint (SCC) set to `restricted-v2` and the default seccomp profile applies to the pod. - -.Procedure - -. You can verify the Security Context Constraint (SCC) and the default seccomp profile set on a pod by running the following commands: - -.. Verify what pods are running in the namespace: -+ -[source, terminal] ----- -$ oc get pods -n <namespace> ----- -+ -For example, to verify what pods are running in the `workshop` namespace run the following: -+ -[source, terminal] ----- -$ oc get pods -n workshop ----- -+ -.Example output -+ -[source, terminal] ----- -NAME READY STATUS RESTARTS AGE -parksmap-1-4xkwf 1/1 Running 0 2m17s -parksmap-1-deploy 0/1 Completed 0 2m22s ----- -+ -.. Inspect the pods: -+ -[source, terminal] ----- -$ oc get pod parksmap-1-4xkwf -n workshop -o yaml ----- -+ -.Example output -+ -[source, terminal] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/network-status: |- - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.18" - ], - "default": true, - "dns": {} - }] - k8s.v1.cni.cncf.io/network-status: |- - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.18" - ], - "default": true, - "dns": {} - }] - openshift.io/deployment-config.latest-version: "1" - openshift.io/deployment-config.name: parksmap - openshift.io/deployment.name: parksmap-1 - openshift.io/generated-by: OpenShiftWebConsole - openshift.io/scc: restricted-v2 <1> - seccomp.security.alpha.kubernetes.io/pod: runtime/default <2> ----- -<1> The `restricted-v2` SCC is added by default if your workload does not have access to a different SCC. -<2> Newly created pods in {product-version} will have the seccomp profile configured to `runtime/default` as mandated by the SCC. - -[id="upgraded_cluster_{context}"] -== Upgraded cluster - -In clusters upgraded to {product-version} all authenticated users have access to the `restricted` and `restricted-v2` SCC. - -A workload admitted by the SCC `restricted` for example, on a {product-title} v4.10 cluster when upgraded may get admitted by `restricted-v2`. This is because `restricted-v2` is the more restrictive SCC between `restricted` and `restricted-v2`. -[NOTE] -==== -The workload must be able to run with `retricted-v2`. -==== - -Conversely with a workload that requires `privilegeEscalation: true` this workload will continue to have the `restricted` SCC available for any authenticated user. This is because `restricted-v2` does not allow `privilegeEscalation`. - -[id="newly_installed_{context}"] -== Newly installed cluster - -For newly installed {product-title} 4.11 or later clusters, the `restricted-v2` replaces the `restricted` SCC as an SCC that is available to be used by any authenticated user. A workload with `privilegeEscalation: true`, is not admitted into the cluster since `restricted-v2` is the only SCC available for authenticated users by default. - -The feature `privilegeEscalation` is allowed by `restricted` but not by `restricted-v2`. More features are denied by `restricted-v2` than were allowed by `restricted` SCC. - -A workload with `privilegeEscalation: true` may be admitted into a newly installed {product-title} 4.11 or later cluster. To give access to the `restricted` SCC to the ServiceAccount running the workload (or any other SCC that can admit this workload) using a RoleBinding run the following command: - -[source, terminal] ----- -$ oc -n <workload-namespace> adm policy add-scc-to-user <scc-name> -z <serviceaccount_name> ----- - -In {product-title} {product-version} the ability to add the pod annotations `seccomp.security.alpha.kubernetes.io/pod: runtime/default` and `container.seccomp.security.alpha.kubernetes.io/<container_name>: runtime/default` is deprecated. diff --git a/modules/configuring-dynamic-admission.adoc b/modules/configuring-dynamic-admission.adoc deleted file mode 100644 index 9dccc271ee65..000000000000 --- a/modules/configuring-dynamic-admission.adoc +++ /dev/null @@ -1,386 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -:_content-type: PROCEDURE -[id="configuring-dynamic-admission_{context}"] -= Configuring dynamic admission - -This procedure outlines high-level steps to configure dynamic admission. The functionality of the admission chain is extended by configuring a webhook admission plugin to call out to a webhook server. - -The webhook server is also configured as an aggregated API server. This allows other {product-title} components to communicate with the webhook using internal credentials and facilitates testing using the `oc` command. Additionally, this enables role based access control (RBAC) into the webhook and prevents token information from other API servers from being disclosed to the webhook. - -.Prerequisites - -* An {product-title} account with cluster administrator access. -* The {product-title} CLI (`oc`) installed. -* A published webhook server container image. - -.Procedure - -. Build a webhook server container image and make it available to the cluster using an image registry. - -. Create a local CA key and certificate and use them to sign the webhook server's certificate signing request (CSR). - -. Create a new project for webhook resources: -+ -[source,terminal] ----- -$ oc new-project my-webhook-namespace <1> ----- -<1> Note that the webhook server might expect a specific name. - -. Define RBAC rules for the aggregated API service in a file called `rbac.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: List -items: - -- apiVersion: rbac.authorization.k8s.io/v1 <1> - kind: ClusterRoleBinding - metadata: - name: auth-delegator-my-webhook-namespace - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:auth-delegator - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <2> - kind: ClusterRole - metadata: - annotations: - name: system:openshift:online:my-webhook-server - rules: - - apiGroups: - - online.openshift.io - resources: - - namespacereservations <3> - verbs: - - get - - list - - watch - -- apiVersion: rbac.authorization.k8s.io/v1 <4> - kind: ClusterRole - metadata: - name: system:openshift:online:my-webhook-requester - rules: - - apiGroups: - - admission.online.openshift.io - resources: - - namespacereservations <5> - verbs: - - create - -- apiVersion: rbac.authorization.k8s.io/v1 <6> - kind: ClusterRoleBinding - metadata: - name: my-webhook-server-my-webhook-namespace - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:openshift:online:my-webhook-server - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <7> - kind: RoleBinding - metadata: - namespace: kube-system - name: extension-server-authentication-reader-my-webhook-namespace - roleRef: - kind: Role - apiGroup: rbac.authorization.k8s.io - name: extension-apiserver-authentication-reader - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <8> - kind: ClusterRole - metadata: - name: my-cluster-role - rules: - - apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch - -- apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: my-cluster-role - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: my-cluster-role - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server ----- -<1> Delegates authentication and authorization to the webhook server API. -<2> Allows the webhook server to access cluster resources. -<3> Points to resources. This example points to the `namespacereservations` resource. -<4> Enables the aggregated API server to create admission reviews. -<5> Points to resources. This example points to the `namespacereservations` resource. -<6> Enables the webhook server to access cluster resources. -<7> Role binding to read the configuration for terminating authentication. -<8> Default cluster role and cluster role bindings for an aggregated API server. - -. Apply those RBAC rules to the cluster: -+ -[source,terminal] ----- -$ oc auth reconcile -f rbac.yaml ----- - -. Create a YAML file called `webhook-daemonset.yaml` that is used to deploy a webhook as a daemon set server in a namespace: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - namespace: my-webhook-namespace - name: server - labels: - server: "true" -spec: - selector: - matchLabels: - server: "true" - template: - metadata: - name: server - labels: - server: "true" - spec: - serviceAccountName: server - containers: - - name: my-webhook-container <1> - image: <image_registry_username>/<image_path>:<tag> <2> - imagePullPolicy: IfNotPresent - command: - - <container_commands> <3> - ports: - - containerPort: 8443 <4> - volumeMounts: - - mountPath: /var/serving-cert - name: serving-cert - readinessProbe: - httpGet: - path: /healthz - port: 8443 <5> - scheme: HTTPS - volumes: - - name: serving-cert - secret: - defaultMode: 420 - secretName: server-serving-cert ----- -<1> Note that the webhook server might expect a specific container name. -<2> Points to a webhook server container image. Replace `<image_registry_username>/<image_path>:<tag>` with the appropriate value. -<3> Specifies webhook container run commands. Replace `<container_commands>` with the appropriate value. -<4> Defines the target port within pods. This example uses port 8443. -<5> Specifies the port used by the readiness probe. This example uses port 8443. - -. Deploy the daemon set: -+ -[source,terminal] ----- -$ oc apply -f webhook-daemonset.yaml ----- - -. Define a secret for the service serving certificate signer, within a YAML file called `webhook-secret.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: my-webhook-namespace - name: server-serving-cert -type: kubernetes.io/tls -data: - tls.crt: <server_certificate> <1> - tls.key: <server_key> <2> ----- -<1> References the signed webhook server certificate. Replace `<server_certificate>` with the appropriate certificate in base64 format. -<2> References the signed webhook server key. Replace `<server_key>` with the appropriate key in base64 format. - -. Create the secret: -+ -[source,terminal] ----- -$ oc apply -f webhook-secret.yaml ----- - -. Define a service account and service, within a YAML file called `webhook-service.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: List -items: - -- apiVersion: v1 - kind: ServiceAccount - metadata: - namespace: my-webhook-namespace - name: server - -- apiVersion: v1 - kind: Service - metadata: - namespace: my-webhook-namespace - name: server - annotations: - service.beta.openshift.io/serving-cert-secret-name: server-serving-cert - spec: - selector: - server: "true" - ports: - - port: 443 <1> - targetPort: 8443 <2> ----- -<1> Defines the port that the service listens on. This example uses port 443. -<2> Defines the target port within pods that the service forwards connections to. This example uses port 8443. - -. Expose the webhook server within the cluster: -+ -[source,terminal] ----- -$ oc apply -f webhook-service.yaml ----- - -. Define a custom resource definition for the webhook server, in a file called `webhook-crd.yaml`: -+ -[source,yaml] ----- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: namespacereservations.online.openshift.io <1> -spec: - group: online.openshift.io <2> - version: v1alpha1 <3> - scope: Cluster <4> - names: - plural: namespacereservations <5> - singular: namespacereservation <6> - kind: NamespaceReservation <7> ----- -<1> Reflects `CustomResourceDefinition` `spec` values and is in the format `<plural>.<group>`. This example uses the `namespacereservations` resource. -<2> REST API group name. -<3> REST API version name. -<4> Accepted values are `Namespaced` or `Cluster`. -<5> Plural name to be included in URL. -<6> Alias seen in `oc` output. -<7> The reference for resource manifests. - -. Apply the custom resource definition: -+ -[source,terminal] ----- -$ oc apply -f webhook-crd.yaml ----- - -. Configure the webhook server also as an aggregated API server, within a file called `webhook-api-service.yaml`: -+ -[source,yaml] ----- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.admission.online.openshift.io -spec: - caBundle: <ca_signing_certificate> <1> - group: admission.online.openshift.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: server - namespace: my-webhook-namespace - version: v1beta1 ----- -<1> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `<ca_signing_certificate>` with the appropriate certificate in base64 format. - -. Deploy the aggregated API service: -+ -[source,terminal] ----- -$ oc apply -f webhook-api-service.yaml ----- - -. Define the webhook admission plugin configuration within a file called `webhook-config.yaml`. This example uses the validating admission plugin: -+ -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: namespacereservations.admission.online.openshift.io <1> -webhooks: -- name: namespacereservations.admission.online.openshift.io <2> - clientConfig: - service: <3> - namespace: default - name: kubernetes - path: /apis/admission.online.openshift.io/v1beta1/namespacereservations <4> - caBundle: <ca_signing_certificate> <5> - rules: - - operations: - - CREATE - apiGroups: - - project.openshift.io - apiVersions: - - "*" - resources: - - projectrequests - - operations: - - CREATE - apiGroups: - - "" - apiVersions: - - "*" - resources: - - namespaces - failurePolicy: Fail ----- -<1> Name for the `ValidatingWebhookConfiguration` object. This example uses the `namespacereservations` resource. -<2> Name of the webhook to call. This example uses the `namespacereservations` resource. -<3> Enables access to the webhook server through the aggregated API. -<4> The webhook URL used for admission requests. This example uses the `namespacereservation` resource. -<5> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `<ca_signing_certificate>` with the appropriate certificate in base64 format. - -. Deploy the webhook: -+ -[source,terminal] ----- -$ oc apply -f webhook-config.yaml ----- - -. Verify that the webhook is functioning as expected. For example, if you have configured dynamic admission to reserve specific namespaces, confirm that requests to create those namespaces are rejected and that requests to create non-reserved namespaces succeed. diff --git a/modules/configuring-egress-proxy-edns-operator.adoc b/modules/configuring-egress-proxy-edns-operator.adoc deleted file mode 100644 index 98b318759b6b..000000000000 --- a/modules/configuring-egress-proxy-edns-operator.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-configuring-cluster-wide-egress-proxy.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-cluster-wide-proxy_{context}"] -= Configuring the External DNS Operator to trust the certificate authority of the cluster-wide proxy - -You can configure the External DNS Operator to trust the certificate authority of the cluster-wide proxy. - -.Procedure - -. Create the config map to contain the CA bundle in the `external-dns-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator create configmap trusted-ca ----- - -. To inject the trusted CA bundle into the config map, add the `config.openshift.io/inject-trusted-cabundle=true` label to the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true ----- - -. Update the subscription of the External DNS Operator by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator patch subscription external-dns-operator --type='json' -p='[{"op": "add", "path": "/spec/config", "value":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}]}}]' ----- - -.Verification - -* After the deployment of the External DNS Operator is completed, verify that the trusted CA environment variable is added to the `external-dns-operator` deployment by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator exec deploy/external-dns-operator -c external-dns-operator -- printenv TRUSTED_CA_CONFIGMAP_NAME ----- -+ -.Example output -[source,terminal] ----- -trusted-ca ----- \ No newline at end of file diff --git a/modules/configuring-egress-proxy.adoc b/modules/configuring-egress-proxy.adoc deleted file mode 100644 index b0b9f0f678ce..000000000000 --- a/modules/configuring-egress-proxy.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/configure-egress-proxy-aws-load-balancer-operator.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-cluster-wide-proxy_{context}"] -= Configuring the AWS Load Balancer Operator to trust the certificate authority of the cluster-wide proxy - -. Create the config map to contain the certificate authority (CA) bundle in the `aws-load-balancer-operator` namespace and inject a CA bundle that is trusted by {product-title} into a config map by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator create configmap trusted-ca ----- - -. To inject the trusted CA bundle into the config map, add the `config.openshift.io/inject-trusted-cabundle=true` label to the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true ----- - -. Update the subscription of the AWS Load Balancer Operator to access the config map in the deployment of the AWS Load Balancer Operator by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator patch subscription aws-load-balancer-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}],"volumes":[{"name":"trusted-ca","configMap":{"name":"trusted-ca"}}],"volumeMounts":[{"name":"trusted-ca","mountPath":"/etc/pki/tls/certs/albo-tls-ca-bundle.crt","subPath":"ca-bundle.crt"}]}}}' ----- - -. After the deployment of the AWS Load Balancer Operator is completed, verify that the CA bundle is added to the `aws-load-balancer-operator-controller-manager` deployment by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator exec deploy/aws-load-balancer-operator-controller-manager -c manager -- bash -c "ls -l /etc/pki/tls/certs/albo-tls-ca-bundle.crt; printenv TRUSTED_CA_CONFIGMAP_NAME" ----- -+ -.Example output -[source,terminal] ----- --rw-r--r--. 1 root 1000690000 5875 Jan 11 12:25 /etc/pki/tls/certs/albo-tls-ca-bundle.crt -trusted-ca ----- - -. Optional: Restart deployment of the AWS Load Balancer Operator every time the config map changes by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator rollout restart deployment/aws-load-balancer-operator-controller-manager ----- \ No newline at end of file diff --git a/modules/configuring-firewall.adoc b/modules/configuring-firewall.adoc deleted file mode 100644 index 7f770da794ac..000000000000 --- a/modules/configuring-firewall.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/configuring-firewall.adoc - -:_content-type: PROCEDURE -[id="configuring-firewall_{context}"] -= Configuring your firewall for {product-title} - -Before you install {product-title}, you must configure your firewall to grant access to the sites that {product-title} requires. - -There are no special configuration considerations for services running on only controller nodes compared to worker nodes. - -[NOTE] -==== -If your environment has a dedicated load balancer in front of your {product-title} cluster, review the allowlists between your firewall and load balancer to prevent unwanted network restrictions to your cluster. -==== - -.Procedure - -. Allowlist the following registry URLs: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`registry.redhat.io` -|443, 80 -|Provides core container images - -|`access.redhat.com` -|443, 80 -|Provides core container images - -|`quay.io` -|443, 80 -|Provides core container images - -|`cdn.quay.io` -|443, 80 -|Provides core container images - -|`cdn01.quay.io` -|443, 80 -|Provides core container images - -|`cdn02.quay.io` -|443, 80 -|Provides core container images - -|`cdn03.quay.io` -|443, 80 -|Provides core container images - -|`sso.redhat.com` -|443, 80 -|The `https://console.redhat.com/openshift` site uses authentication from `sso.redhat.com` - -|=== -+ -You can use the wildcards `\*.quay.io` and `*.openshiftapps.com` instead of `cdn0[1-3].quay.io` in your allowlist. When you add a site, such as `quay.io`, to your allowlist, do not add a wildcard entry, such as `*.quay.io`, to your denylist. In most cases, image registries use a content delivery network (CDN) to serve images. If a firewall blocks access, image downloads are denied when the initial download request redirects to a hostname such as `cdn01.quay.io`. - -. Allowlist any site that provides resources for a language or framework that your builds require. - -. If you do not disable Telemetry, you must grant access to the following URLs to access Red Hat Insights: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`cert-api.access.redhat.com` -|443, 80 -|Required for Telemetry - -|`api.access.redhat.com` -|443, 80 -|Required for Telemetry - -|`infogw.api.openshift.com` -|443, 80 -|Required for Telemetry - -|`console.redhat.com/api/ingress` -|443, 80 -|Required for Telemetry and for `insights-operator` -|=== - -. If you use Alibaba Cloud, Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) to host your cluster, you must grant access to the URLs that provide the cloud provider API and DNS for that cloud: -+ -[cols="2a,8a,2a,8a",options="header"] -|=== -|Cloud |URL | Port |Function - -|Alibaba -|`*.aliyuncs.com` -|443, 80 -|Required to access Alibaba Cloud services and resources. Review the link:https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/sdk/endpoints/endpoints_config.go?spm=a2c4g.11186623.0.0.47875873ciGnC8&file=endpoints_config.go[Alibaba endpoints_config.go file] to determine the exact endpoints to allow for the regions that you use. - -.15+|AWS -|`*.amazonaws.com` - -Alternatively, if you choose to not use a wildcard for AWS APIs, you must allowlist the following URLs: -|443, 80 -|Required to access AWS services and resources. Review the link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS Service Endpoints] in the AWS documentation to determine the exact endpoints to allow for the regions that you use. - -|`ec2.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`events.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`iam.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`route53.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.dualstack.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`sts.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`sts.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`tagging.us-east-1.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. This endpoint is always `us-east-1`, regardless of the region the cluster is deployed in. - -|`ec2.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`elasticloadbalancing.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`servicequotas.<aws_region>.amazonaws.com` -|443, 80 -|Required. Used to confirm quotas for deploying the service. - -|`tagging.<aws_region>.amazonaws.com` -|443, 80 -|Allows the assignment of metadata about AWS resources in the form of tags. - -.2+|GCP -|`*.googleapis.com` -|443, 80 -|Required to access GCP services and resources. Review link:https://cloud.google.com/endpoints/[Cloud Endpoints] in the GCP documentation to determine the endpoints to allow for your APIs. - -|`accounts.google.com` -|443, 80 -| Required to access your GCP account. - -.4+|Azure -|`management.azure.com` -|443, 80 -|Required to access Azure services and resources. Review the link:https://docs.microsoft.com/en-us/rest/api/azure/[Azure REST API reference] in the Azure documentation to determine the endpoints to allow for your APIs. - -|`*.blob.core.windows.net` -|443, 80 -|Required to download Ignition files. - -|`login.microsoftonline.com` -|443, 80 -|Required to access Azure services and resources. Review the link:https://docs.microsoft.com/en-us/rest/api/azure/[Azure REST API reference] in the Azure documentation to determine the endpoints to allow for your APIs. - -|=== - -. Allowlist the following URLs: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`mirror.openshift.com` -|443, 80 -|Required to access mirrored installation content and images. This site is also a source of release image signatures, although the Cluster Version Operator needs only a single functioning source. - -|`storage.googleapis.com/openshift-release` -|443, 80 -|A source of release image signatures, although the Cluster Version Operator needs only a single functioning source. - -|`*.apps.<cluster_name>.<base_domain>` -|443, 80 -|Required to access the default cluster routes unless you set an ingress wildcard during installation. - -|`quayio-production-s3.s3.amazonaws.com` -|443, 80 -|Required to access Quay image content in AWS. - -|`api.openshift.com` -|443, 80 -|Required both for your cluster token and to check if updates are available for the cluster. - -|`rhcos.mirror.openshift.com` -|443, 80 -|Required to download {op-system-first} images. - -|`console.redhat.com/openshift` -|443, 80 -|Required for your cluster token. - -// |`registry.access.redhat.com` -// |443, 80 -// |Required for `odo` CLI. - -|`sso.redhat.com` -|443, 80 -|The `https://console.redhat.com/openshift` site uses authentication from `sso.redhat.com` - -|=== -Operators require route access to perform health checks. Specifically, the -authentication and web console Operators connect to two routes to verify that -the routes work. If you are the cluster administrator and do not want to allow -`*.apps.<cluster_name>.<base_domain>`, then allow these routes: -+ -* `oauth-openshift.apps.<cluster_name>.<base_domain>` -* `console-openshift-console.apps.<cluster_name>.<base_domain>`, or the hostname -that is specified in the `spec.route.hostname` field of the -`consoles.operator/cluster` object if the field is not empty. - -. Allowlist the following URLs for optional third-party content: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`registry.connect.redhat.com` -|443, 80 -|Required for all third-party images and certified operators. - -|`rhc4tp-prod-z8cxf-image-registry-us-east-1-evenkyleffocxqvofrk.s3.dualstack.us-east-1.amazonaws.com` -|443, 80 -|Provides access to container images hosted on `registry.connect.redhat.com` - -|`oso-rhc4tp-docker-registry.s3-us-west-2.amazonaws.com` -|443, 80 -|Required for Sonatype Nexus, F5 Big IP operators. -|=== -+ -. If you use a default Red Hat Network Time Protocol (NTP) server allow the following URLs: -* `1.rhel.pool.ntp.org` -* `2.rhel.pool.ntp.org` -* `3.rhel.pool.ntp.org` - -[NOTE] -==== -If you do not use a default Red Hat NTP server, verify the NTP server for your platform and allow it in your firewall. -==== diff --git a/modules/configuring-haproxy-interval.adoc b/modules/configuring-haproxy-interval.adoc deleted file mode 100644 index f2cc8051051f..000000000000 --- a/modules/configuring-haproxy-interval.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-haproxy-interval_{context}"] -= Configuring HAProxy reload interval - -When you update a route or an endpoint associated with a route, {product-title} router updates the configuration for HAProxy. Then, HAProxy reloads the updated configuration for those changes to take effect. When HAProxy reloads, it generates a new process that handles new connections using the updated configuration. - -HAProxy keeps the old process running to handle existing connections until those connections are all closed. When old processes have long-lived connections, these processes can accumulate and consume resources. - -The default minimum HAProxy reload interval is five seconds. You can configure an Ingress Controller using its `spec.tuningOptions.reloadInterval` field to set a longer minimum reload interval. - -[WARNING] -==== -Setting a large value for the minimum HAProxy reload interval can cause latency in observing updates to routes and their endpoints. To lessen the risk, avoid setting a value larger than the tolerable latency for updates. -==== - -.Procedure - -* Change the minimum HAProxy reload interval of the default Ingress Controller to 15 seconds by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/default --type=merge --patch='{"spec":{"tuningOptions":{"reloadInterval":"15s"}}}' ----- diff --git a/modules/configuring-hpa-based-on-application-metrics.adoc b/modules/configuring-hpa-based-on-application-metrics.adoc deleted file mode 100644 index e907e6d862e4..000000000000 --- a/modules/configuring-hpa-based-on-application-metrics.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/configuring-hpa-for-an-application.adoc - -[id="configuring-hpa-based-on-application-metrics_{context}"] -= Configuring HPA based on application metrics - -If you configure an application to export metrics, you can set up Horizontal Pod Autoscaling (HPA) based on these metrics. - -.Procedure - -. Create a YAML file for your configuration. In this example, it is called `deploy.yaml`. - -. Add configuration for deploying the horizontal pod autoscaler for the application. This example configures and deploys HPA based on the application `http_requests_per_second` metric for the sample application configured in the "Application monitoring" section: -+ -[source,yaml] ----- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: example-app-scaler - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: example-app <1> - minReplicas: 3 <2> - maxReplicas: 10 <3> - metrics: - - type: Pods - pods: - metricName: http_requests_per_second <4> - targetAverageValue: 10 <5> ----- -<1> `name` specifies the application. -<2> `minReplicas` specifies the minimum number of replicas for the HPA to maintain for the application. -<3> `maxReplicas` specifies the maximum number of replicas for the HPA to maintain for the application. -<4> `metricName` specifies the metric upon which HPA is based. Here, specify the metric you previously exposed for your application. -<5> `targetAverageValue` specifies the value of the metric for the HPA to try to maintain by increasing or decreasing the number of replicas. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f deploy.yaml ----- diff --git a/modules/configuring-huge-pages.adoc b/modules/configuring-huge-pages.adoc deleted file mode 100644 index 91fa8949c382..000000000000 --- a/modules/configuring-huge-pages.adoc +++ /dev/null @@ -1,166 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="configuring-huge-pages_{context}"] -= Configuring huge pages at boot time - -Nodes must pre-allocate huge pages used in an {product-title} cluster. There are two ways of reserving huge pages: at boot time and at run time. Reserving at boot time increases the possibility of success because the memory has not yet been significantly fragmented. The Node Tuning Operator currently supports boot time allocation of huge pages on specific nodes. - -.Procedure - -To minimize node reboots, the order of the steps below needs to be followed: - -. Label all nodes that need the same huge pages setting by a label. -+ -[source,terminal] ----- -$ oc label node <node_using_hugepages> node-role.kubernetes.io/worker-hp= ----- - -. Create a file with the following content and name it `hugepages-tuned-boottime.yaml`: -+ -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: hugepages <1> - namespace: openshift-cluster-node-tuning-operator -spec: - profile: <2> - - data: | - [main] - summary=Boot time configuration for hugepages - include=openshift-node - [bootloader] - cmdline_openshift_node_hugepages=hugepagesz=2M hugepages=50 <3> - name: openshift-node-hugepages - - recommend: - - machineConfigLabels: <4> - machineconfiguration.openshift.io/role: "worker-hp" - priority: 30 - profile: openshift-node-hugepages ----- -<1> Set the `name` of the Tuned resource to `hugepages`. -<2> Set the `profile` section to allocate huge pages. -<3> Note the order of parameters is important as some platforms support huge pages of various sizes. -<4> Enable machine config pool based matching. - -. Create the Tuned `hugepages` object -+ -[source,terminal] ----- -$ oc create -f hugepages-tuned-boottime.yaml ----- - -. Create a file with the following content and name it `hugepages-mcp.yaml`: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-hp - labels: - worker-hp: "" -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,worker-hp]} - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-hp: "" ----- - -. Create the machine config pool: -+ -[source,terminal] ----- -$ oc create -f hugepages-mcp.yaml ----- - -Given enough non-fragmented memory, all the nodes in the `worker-hp` machine config pool should now have 50 2Mi huge pages allocated. - -[source,terminal] ----- -$ oc get node <node_using_hugepages> -o jsonpath="{.status.allocatable.hugepages-2Mi}" -100Mi ----- - -ifndef::openshift-origin[] -[WARNING] -==== -The TuneD bootloader plugin is currently supported on {op-system-first} 8.x worker nodes. For {op-system-base-full} 7.x worker nodes, the TuneD bootloader plugin is currently not supported. -==== -endif::openshift-origin[] - -//// -For run-time allocation, kubelet changes are needed, see BZ1819719. -== At run time - -.Procedure - -. Label the node so that the Node Tuning Operator knows on which node to apply the tuned profile, which describes how many huge pages should be allocated: -+ -[source,terminal] ----- -$ oc label node <node_using_hugepages> hugepages=true ----- - -. Create a file with the following content and name it `hugepages-tuned-runtime.yaml`: -+ -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: hugepages <1> - namespace: openshift-cluster-node-tuning-operator -spec: - profile: <2> - - data: | - [main] - summary=Run time configuration for hugepages - include=openshift-node - [vm] - transparent_hugepages=never - [sysfs] - /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages=50 - name: node-hugepages - - recommend: - - match: <3> - - label: hugepages - priority: 30 - profile: node-hugepages ----- -<1> Set the `name` of the Tuned resource to `hugepages`. -<2> Set the `profile` section to allocate huge pages. -<3> Set the `match` section to associate the profile to nodes with the `hugepages` label. - -. Create the custom `hugepages` tuned profile by using the `hugepages-tuned-runtime.yaml` file: -+ -[source,terminal] ----- -$ oc create -f hugepages-tuned-runtime.yaml ----- - -. After creating the profile, the Operator applies the new profile to the correct -node and allocates huge pages. Check the logs of a tuned pod on a node using -huge pages to verify: -+ -[source,terminal] ----- -$ oc logs <tuned_pod_on_node_using_hugepages> \ - -n openshift-cluster-node-tuning-operator | grep 'applied$' | tail -n1 ----- -+ ----- -2019-08-08 07:20:41,286 INFO tuned.daemon.daemon: static tuning from profile 'node-hugepages' applied ----- - -//// diff --git a/modules/configuring-hybrid-ovnkubernetes.adoc b/modules/configuring-hybrid-ovnkubernetes.adoc deleted file mode 100644 index 2c8e01fac4b4..000000000000 --- a/modules/configuring-hybrid-ovnkubernetes.adoc +++ /dev/null @@ -1,158 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc -// * networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.adoc - -ifeval::["{context}" == "configuring-hybrid-networking"] -:post-install: -endif::[] - -:_content-type: PROCEDURE -[id="configuring-hybrid-ovnkubernetes_{context}"] -= Configuring hybrid networking with OVN-Kubernetes - -You can configure your cluster to use hybrid networking with the OVN-Kubernetes network plugin. This allows a hybrid cluster that supports different node networking configurations. - -[NOTE] -==== -This configuration is necessary to run both Linux and Windows nodes in the same cluster. -==== - -ifndef::post-install[] -.Prerequisites - -* You defined `OVNKubernetes` for the `networking.networkType` parameter in the `install-config.yaml` file. See the installation documentation for configuring {product-title} network customizations on your chosen cloud provider for more information. - -.Procedure - -. Change to the directory that contains the installation program and create the manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> ----- -+ --- -where: - -`<installation_directory>`:: Specifies the name of the directory that contains the `install-config.yaml` file for your cluster. --- - -. Create a stub manifest file for the advanced network configuration that is named `cluster-network-03-config.yml` in the `<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -$ cat <<EOF > <installation_directory>/manifests/cluster-network-03-config.yml -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: -EOF ----- -+ --- -where: - -`<installation_directory>`:: Specifies the directory name that contains the -`manifests/` directory for your cluster. --- - -. Open the `cluster-network-03-config.yml` file in an editor and configure OVN-Kubernetes with hybrid networking, such as in the following example: -+ --- -.Specify a hybrid networking configuration -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - hybridOverlayConfig: - hybridClusterNetwork: <1> - - cidr: 10.132.0.0/14 - hostPrefix: 23 - hybridOverlayVXLANPort: 9898 <2> ----- -<1> Specify the CIDR configuration used for nodes on the additional overlay network. The `hybridClusterNetwork` CIDR cannot overlap with the `clusterNetwork` CIDR. -<2> Specify a custom VXLAN port for the additional overlay network. This is required for running Windows nodes in a cluster installed on vSphere, and must not be configured for any other cloud provider. The custom port can be any open port excluding the default `4789` port. For more information on this requirement, see the Microsoft documentation on link:https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/common-problems#pod-to-pod-connectivity-between-hosts-is-broken-on-my-kubernetes-cluster-running-on-vsphere[Pod-to-pod connectivity between hosts is broken]. --- -+ -[NOTE] -==== -Windows Server Long-Term Servicing Channel (LTSC): Windows Server 2019 is not supported on clusters with a custom `hybridOverlayVXLANPort` value because this Windows server version does not support selecting a custom VXLAN port. -==== - -. Save the `cluster-network-03-config.yml` file and quit the text editor. -. Optional: Back up the `manifests/cluster-network-03-config.yml` file. The -installation program deletes the `manifests/` directory when creating the -cluster. -endif::post-install[] -ifdef::post-install[] -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. -* Ensure that the cluster uses the OVN-Kubernetes network plugin. - -.Procedure - -. To configure the OVN-Kubernetes hybrid network overlay, enter the following command: -+ -[source,terminal] ----- -$ oc patch networks.operator.openshift.io cluster --type=merge \ - -p '{ - "spec":{ - "defaultNetwork":{ - "ovnKubernetesConfig":{ - "hybridOverlayConfig":{ - "hybridClusterNetwork":[ - { - "cidr": "<cidr>", - "hostPrefix": <prefix> - } - ], - "hybridOverlayVXLANPort": <overlay_port> - } - } - } - } - }' ----- -+ --- -where: - -`cidr`:: Specify the CIDR configuration used for nodes on the additional overlay network. This CIDR cannot overlap with the cluster network CIDR. -`hostPrefix`:: Specifies the subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `/23` subnet out of the given `cidr`, which allows for 510 (2^(32 - 23) - 2) pod IP addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. -`hybridOverlayVXLANPort`:: Specify a custom VXLAN port for the additional overlay network. This is required for running Windows nodes in a cluster installed on vSphere, and must not be configured for any other cloud provider. The custom port can be any open port excluding the default `4789` port. For more information on this requirement, see the Microsoft documentation on link:https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/common-problems#pod-to-pod-connectivity-between-hosts-is-broken-on-my-kubernetes-cluster-running-on-vsphere[Pod-to-pod connectivity between hosts is broken]. - -[NOTE] -==== -Windows Server Long-Term Servicing Channel (LTSC): Windows Server 2019 is not supported on clusters with a custom `hybridOverlayVXLANPort` value because this Windows server version does not support selecting a custom VXLAN port. -==== --- -+ -.Example output -[source,text] ----- -network.operator.openshift.io/cluster patched ----- - -. To confirm that the configuration is active, enter the following command. It can take several minutes for the update to apply. -+ -[source,terminal] ----- -$ oc get network.operator.openshift.io -o jsonpath="{.items[0].spec.defaultNetwork.ovnKubernetesConfig}" ----- -endif::post-install[] - -ifdef::post-install[] -:!post-install: -endif::[] diff --git a/modules/configuring-layer-three-routed-topology.adoc b/modules/configuring-layer-three-routed-topology.adoc deleted file mode 100644 index 9ad6d548be75..000000000000 --- a/modules/configuring-layer-three-routed-topology.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-layer-three-routed-topology_{context}"] -= Configuration for a routed topology - -The routed (layer 3) topology networks are a simplified topology for the cluster default network without egress or ingress. In this topology, there is one logical switch per node, each with a different subnet, and a router interconnecting all logical switches. - -This configuration can be used for IPv6 and dual-stack deployments. - -[NOTE] -==== -* Layer 3 routed topology networks only allow for the transfer of data packets between pods within a cluster. -* Creating a secondary network with an IPv6 subnet or dual-stack subnets fails on a single-stack {product-title} cluster. This is a known limitation and will be fixed a future version of {product-title}. -==== - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a routed secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "ns1-l3-network", - "type": "ovn-k8s-cni-overlay", - "topology":"layer3", - "subnets": "10.128.0.0/16/24", - "mtu": 1300, - "netAttachDefName": "ns1/l3-network" - } ----- \ No newline at end of file diff --git a/modules/configuring-layer-two-switched-topology.adoc b/modules/configuring-layer-two-switched-topology.adoc deleted file mode 100644 index 90af949198a3..000000000000 --- a/modules/configuring-layer-two-switched-topology.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-layer-two-switched-topology_{context}"] -= Configuration for a switched topology - -The switched (layer 2) topology networks interconnect the workloads through a cluster-wide logical switch. This configuration can be used for IPv6 and dual-stack deployments. - -[NOTE] -==== -Layer 2 switched topology networks only allow for the transfer of data packets between pods within a cluster. -==== - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a switched secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "l2-network", - "type": "ovn-k8s-cni-overlay", - "topology":"layer2", - "subnets": "10.100.200.0/24", - "mtu": 1300, - "netAttachDefName": "ns1/l2-network", - "excludeSubnets": "10.100.200.0/29" - } ----- \ No newline at end of file diff --git a/modules/configuring-localnet-switched-topology.adoc b/modules/configuring-localnet-switched-topology.adoc deleted file mode 100644 index 3db11b0bb4a2..000000000000 --- a/modules/configuring-localnet-switched-topology.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/configuring-secondary-networks.adoc - -:_content-type: REFERENCE -[id="configuration-localnet-switched-topology_{context}"] -= Configuration for a localnet switched topology - -The switched (localnet) topology interconnects the workloads through a cluster-wide logical switch to a physical network. - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a localnet secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "ns1-localnet-network", - "type": "ovn-k8s-cni-overlay", - "topology":"localnet", - "subnets": "202.10.130.112/28", - "vlanID": 33, - "mtu": 1500, - "netAttachDefName": "ns1/localnet-network" - "excludeSubnets": "10.100.200.0/29" - - } ----- \ No newline at end of file diff --git a/modules/configuring-node-pools-for-hcp.adoc b/modules/configuring-node-pools-for-hcp.adoc deleted file mode 100644 index 2719819c77cd..000000000000 --- a/modules/configuring-node-pools-for-hcp.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * updates/updating_a_cluster/updating-hosted-control-planes.adoc -// * hosted_control_planes/hcp-managing.adoc - -:_content-type: PROCEDURE -[id="configuring-node-pools-for-hcp_{context}"] -= Configuring node pools for hosted control planes - -On hosted control planes, you can configure node pools by creating a `MachineConfig` object inside of a config map in the management cluster. - -//.Prerequisites - -//Are any prerequisites needed for this procedure? i.e., does the customer need to perform an update first? - -.Procedure - -. To create a `MachineConfig` object inside of a config map in the management cluster, enter the following information: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: <configmap-name> - namespace: clusters -data: - config: | - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: <machineconfig-name> - spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:... - mode: 420 - overwrite: true - path: ${PATH} <1> ----- -<1> Sets the path on the node where the `MachineConfig` object is stored. - -. After you add the object to the config map, you can apply the config map to the node pool as follows: -+ -[source,yaml] ----- -spec: - config: - - name: ${CONFIGMAP_NAME} ----- - -//.Verification - -// Does the user need to do anything to verify that the procedure was successful? - - - - diff --git a/modules/configuring-ovnk-additional-networks.adoc b/modules/configuring-ovnk-additional-networks.adoc deleted file mode 100644 index fbc930409e57..000000000000 --- a/modules/configuring-ovnk-additional-networks.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-ovnk-additional-networks_{context}"] -= Configuration for an OVN-Kubernetes additional network - -The {openshift-networking} OVN-Kubernetes network plugin allows the configuration of secondary network interfaces for pods. To configure secondary network interfaces, you must define the configurations in the `NetworkAttachmentDefinition` custom resource definition (CRD). - -:FeatureName: Configuration for an OVN-Kubernetes additional network -include::snippets/technology-preview.adoc[] - -The following sections provide example configurations for each of the topologies that OVN-Kubernetes currently allows for secondary networks. - -[NOTE] -==== -Networks names must be unique. For example, creating multiple `NetworkAttachmentDefinition` CRDs with different configurations that reference the same network is unsupported. -==== \ No newline at end of file diff --git a/modules/configuring-ovs-log-level-permanently.adoc b/modules/configuring-ovs-log-level-permanently.adoc deleted file mode 100644 index d3a6a5ea1154..000000000000 --- a/modules/configuring-ovs-log-level-permanently.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-network-issues.adoc - -:_content-type: PROCEDURE -[id="configuring-ovs-log-level-permanently_{context}"] -= Configuring the Open vSwitch log level permanently - -For long-term changes to the Open vSwitch (OVS) log level, you can change the log level permanently. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a file, such as `99-change-ovs-loglevel.yaml`, with a `MachineConfig` object like the following example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master <1> - name: 99-change-ovs-loglevel -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - dropins: - - contents: | - [Service] - ExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:dbg <2> - ExecReload=-/usr/bin/ovs-appctl vlog/set syslog:dbg - name: 20-ovs-vswitchd-restart.conf - name: ovs-vswitchd.service ----- -<1> After you perform this procedure to configure control plane nodes, repeat the procedure and set the role to `worker` to configure worker nodes. -<2> Set the `syslog:<log_level>` value. Log levels are `off`, `emer`, `err`, `warn`, `info`, or `dbg`. Setting the value to `off` filters out all log messages. - -. Apply the machine config: -+ -[source,terminal] ----- -$ oc apply -f 99-change-ovs-loglevel.yaml ----- - -ifdef::ign-config-version[] -:!ign-config-version: -endif::[] - diff --git a/modules/configuring-ovs-log-level-temp.adoc b/modules/configuring-ovs-log-level-temp.adoc deleted file mode 100644 index a91b89b10c13..000000000000 --- a/modules/configuring-ovs-log-level-temp.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: PROCEDURE -[id="configuring-ovs-log-level-temp_{context}"] -= Configuring the Open vSwitch log level temporarily - -For short-term troubleshooting, you can configure the Open vSwitch (OVS) log level temporarily. -The following procedure does not require rebooting the node. -In addition, the configuration change does not persist whenever you reboot the node. - -After you perform this procedure to change the log level, you can receive log messages from the machine config daemon that indicate a content mismatch for the `ovs-vswitchd.service`. -To avoid the log messages, repeat this procedure and set the log level to the original value. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Start a debug pod for a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell. The debug pod mounts the root file system from the host in `/host` within the pod. By changing the root directory to `/host`, you can run binaries from the host file system: -+ -[source,terminal] ----- -# chroot /host ----- - -. View the current syslog level for OVS modules: -+ -[source,terminal] ----- -# ovs-appctl vlog/list ----- -+ -The following example output shows the log level for syslog set to `info`. -+ -.Example output -[source,terminal] ----- - console syslog file - ------- ------ ------ -backtrace OFF INFO INFO -bfd OFF INFO INFO -bond OFF INFO INFO -bridge OFF INFO INFO -bundle OFF INFO INFO -bundles OFF INFO INFO -cfm OFF INFO INFO -collectors OFF INFO INFO -command_line OFF INFO INFO -connmgr OFF INFO INFO -conntrack OFF INFO INFO -conntrack_tp OFF INFO INFO -coverage OFF INFO INFO -ct_dpif OFF INFO INFO -daemon OFF INFO INFO -daemon_unix OFF INFO INFO -dns_resolve OFF INFO INFO -dpdk OFF INFO INFO -... ----- - -. Specify the log level in the `/etc/systemd/system/ovs-vswitchd.service.d/10-ovs-vswitchd-restart.conf` file: -+ -[source,text] ----- -Restart=always -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch' -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch' -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch' -ExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:dbg -ExecReload=-/usr/bin/ovs-appctl vlog/set syslog:dbg ----- -+ -In the preceding example, the log level is set to `dbg`. -Change the last two lines by setting `syslog:<log_level>` to `off`, `emer`, `err`, `warn`, `info`, or `dbg`. The `off` log level filters out all log messages. - -. Restart the service: -+ -[source,terminal] ----- -# systemctl daemon-reload ----- -+ -[source,terminal] ----- -# systemctl restart ovs-vswitchd ----- - diff --git a/modules/configuring-pods-secondary-network.adoc b/modules/configuring-pods-secondary-network.adoc deleted file mode 100644 index ff0c5becc876..000000000000 --- a/modules/configuring-pods-secondary-network.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: REFERENCE -[id="configuring-pods-secondary-network_{context}"] -= Configuring pods for additional networks - -You must specify the secondary network attachments through the `k8s.v1.cni.cncf.io/networks` annotation. - -The following example provisions a pod with two secondary attachments, one for each of the attachment configurations presented in this guide. - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: l2-network - name: tinypod - namespace: ns1 -spec: - containers: - - args: - - pause - image: k8s.gcr.io/e2e-test-images/agnhost:2.36 - imagePullPolicy: IfNotPresent - name: agnhost-container ----- \ No newline at end of file diff --git a/modules/configuring-pods-static-ip.adoc b/modules/configuring-pods-static-ip.adoc deleted file mode 100644 index 7c463ef9e61d..000000000000 --- a/modules/configuring-pods-static-ip.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuring-pods-static-ip_{context}"] -= Configuring pods with a static IP address - -The following example provisions a pod with a static IP address. - -[NOTE] -==== -* You can only specify the IP address for a pod's secondary network attachment for layer 2 attachments. -* Specifying a static IP address for the pod is only possible when the attachment configuration does not feature subnets. -==== - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: '[ - { - "name": "l2-network", <1> - "mac": "02:03:04:05:06:07", <2> - "interface": "myiface1", <3> - "ips": [ - "192.0.2.20/24" - ] <4> - } - ]' - name: tinypod - namespace: ns1 -spec: - containers: - - args: - - pause - image: k8s.gcr.io/e2e-test-images/agnhost:2.36 - imagePullPolicy: IfNotPresent - name: agnhost-container ----- -<1> The name of the network. This value must be unique across all `NetworkAttachmentDefinitions`. -<2> The MAC address to be assigned for the interface. -<3> The name of the network interface to be created for the pod. -<4> The IP addresses to be assigned to the network interface. diff --git a/modules/configuring-secret-for-wmco.adoc b/modules/configuring-secret-for-wmco.adoc deleted file mode 100644 index ac87fcdf07f5..000000000000 --- a/modules/configuring-secret-for-wmco.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/enabling-windows-container-workloads.adoc - -:_content-type: PROCEDURE -[id="configuring-secret-for-wmco_{context}"] -= Configuring a secret for the Windows Machine Config Operator - -To run the Windows Machine Config Operator (WMCO), you must create a secret in the WMCO namespace containing a private key. This is required to allow the WMCO to communicate with the Windows virtual machine (VM). - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You created a PEM-encoded file containing an RSA key. - -.Procedure - -* Define the secret required to access the Windows VMs: -+ -[source,terminal] ----- -$ oc create secret generic cloud-private-key --from-file=private-key.pem=${HOME}/.ssh/<key> \ - -n openshift-windows-machine-config-operator <1> ----- - -<1> You must create the private key in the WMCO namespace, like `openshift-windows-machine-config-operator`. - -It is recommended to use a different private key than the one used when installing the cluster. diff --git a/modules/configuring-user-level-access.adoc b/modules/configuring-user-level-access.adoc deleted file mode 100644 index 0309ba573a69..000000000000 --- a/modules/configuring-user-level-access.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module is included in the following assemblies: -// -// * installing-red-hat-openshift-gitops - -:_content-type: PROCEDURE -[id="configuring-user-level-access_{context}"] -= Configuring user level access - -[role="_abstract"] -To manage and modify the user level access, configure the RBAC section in Argo CD custom resource. - -.Procedure - -* Edit the `argocd` Custom Resource: -+ -[source,terminal] ----- -$ oc edit argocd [argocd-instance-name] -n [namespace] ----- -.Output -+ -[source,yaml] ----- -metadata -... -... - rbac: - policy: 'g, rbacsystem:cluster-admins, role:admin' - scopes: '[groups]' ----- -+ -* Add the `policy` configuration to the `rbac` section and add the `name`, `email` and the `role` of the user: -+ -[source,yaml] ----- -metadata -... -... -rbac: - policy: <name>, <email>, role:<admin> - scopes: '[groups]' ----- - -[NOTE] -==== -Currently, RHSSO cannot read the group information of {gitops-title} users. Therefore, configure the RBAC at the user level. -==== diff --git a/modules/configuring-vsphere-connection-settings.adoc b/modules/configuring-vsphere-connection-settings.adoc deleted file mode 100644 index 2d4940e61ef8..000000000000 --- a/modules/configuring-vsphere-connection-settings.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-vSphere-connection-settings_{context}"] -= Configuring the vSphere connection settings - -[role="_abstract"] -Modify the following vSphere configuration settings as required: - -* vCenter address -* vCenter cluster -* vCenter username -* vCenter password -* vCenter address -* vSphere data center -* vSphere datastore -* Virtual machine folder - -.Prerequisites -* The {ai-full} has finished installing the cluster successfully. -* The cluster is connected to `https://console.redhat.com`. - -.Procedure -. In the Administrator perspective, navigate to *Home -> Overview*. -. Under *Status*, click *vSphere connection* to open the *vSphere connection configuration* wizard. -. In the *vCenter* field, enter the network address of the vSphere vCenter server. This can be either a domain name or an IP address. It appears in the vSphere web client URL; for example `https://[your_vCenter_address]/ui`. -. In the *vCenter cluster* field, enter the name of the vSphere vCenter cluster where {product-title} is installed. -+ -[IMPORTANT] -==== -This step is mandatory if you installed {product-title} 4.13 or later. -==== - -. In the *Username* field, enter your vSphere vCenter username. -. In the *Password* field, enter your vSphere vCenter password. -+ -[WARNING] -==== -The system stores the username and password in the `vsphere-creds` secret in the `kube-system` namespace of the cluster. An incorrect vCenter username or password makes the cluster nodes unschedulable. -==== -+ -. In the *Datacenter* field, enter the name of the vSphere data center that contains the virtual machines used to host the cluster; for example, `SDDC-Datacenter`. -. In the *Default data store* field, enter the path and name of the vSphere data store that stores the persistent data volumes; for example, `/SDDC-Datacenter/datastore/datastorename`. -+ -[WARNING] -==== -Updating the vSphere data center or default data store after the configuration has been saved detaches any active vSphere `PersistentVolumes`. -==== -+ -. In the *Virtual Machine Folder* field, enter the data center folder that contains the virtual machine of the cluster; for example, `/SDDC-Datacenter/vm/ci-ln-hjg4vg2-c61657-t2gzr`. For the {product-title} installation to succeed, all virtual machines comprising the cluster must be located in a single data center folder. -. Click *Save Configuration*. This updates the `cloud-provider-config` ConfigMap resource in the `openshift-config` namespace, and starts the configuration process. -. Reopen the *vSphere connection configuration* wizard and expand the *Monitored operators* panel. Check that the status of the operators is either *Progressing* or *Healthy*. diff --git a/modules/configuring-vsphere-regions-zones.adoc b/modules/configuring-vsphere-regions-zones.adoc deleted file mode 100644 index 47ad37085ed1..000000000000 --- a/modules/configuring-vsphere-regions-zones.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -//* installing/Installing-vsphere-installer-provisioned-customizations.adoc [IPI] -//* installing/installing-vsphere-installer-provisioned-network-customizations.adoc [IPI] -//* installing/installing-vsphere.adoc [UPI] -//* installing/installing-vsphere-network-customizations.adoc [UPI] -//* installing/installing-restricted-networks-installer-provisioned-vsphere.adoc [IPI] -//* installing/installing-restricted-networks-vsphere.adoc [IPI] - -:_content-type: PROCEDURE -[id="configuring-vsphere-regions-zones_{context}"] -= Configuring regions and zones for a VMware vCenter -You can modify the default installation configuration file, so that you can deploy an {product-title} cluster to multiple vSphere datacenters that run in a single VMware vCenter. - -The default `install-config.yaml` file configuration from the previous release of {product-title} is deprecated. You can continue to use the deprecated default configuration, but the `openshift-installer` will prompt you with a warning message that indicates the use of deprecated fields in the configuration file. - -[IMPORTANT] -==== -The example uses the `govc` command. The `govc` command is an open source command available from VMware; it is not available from Red Hat. The Red Hat support team does not maintain the `govc` command. Instructions for downloading and installing `govc` are found on the VMware documentation website -==== - -.Prerequisites -* You have an existing `install-config.yaml` installation configuration file. -+ -[IMPORTANT] -==== -You must specify at least one failure domain for your {product-title} cluster, so that you can provision datacenter objects for your VMware vCenter server. Consider specifying multiple failure domains if you need to provision virtual machine nodes in different datacenters, clusters, datastores, and other components. -==== - -.Procedure - -. Enter the following `govc` command-line tool commands to create the `openshift-region` and `openshift-zone` vCenter tag categories: -+ -[IMPORTANT] -==== -If you specify different names for the `openshift-region` and `openshift-zone` vCenter tag categories, the installation of the {product-title} cluster fails. -==== -+ -[source,terminal] ----- -$ govc tags.category.create -d "OpenShift region" openshift-region ----- -+ -[source,terminal] ----- -$ govc tags.category.create -d "OpenShift zone" openshift-zone ----- - -. To create a region tag for each region vSphere datacenter where you want to deploy your cluster, enter the following command in your terminal: -+ -[source,terminal] ----- -$ govc tags.create -c <region_tag_category> <region_tag> ----- - -. To create a zone tag for each vSphere cluster where you want to deploy your cluster, enter the following command: -+ -[source,terminal] ----- -$ govc tags.create -c <zone_tag_category> <zone_tag> ----- - -. Attach region tags to each vCenter datacenter object by entering the following command: -+ -[source,terminal] ----- -$ govc tags.attach -c <region_tag_category> <region_tag_1> /<datacenter_1> ----- - -. Attach the zone tags to each vCenter datacenter object by entering the following command: -+ -[source,terminal] ----- -$ govc tags.attach -c <zone_tag_category> <zone_tag_1> /<datacenter_1>/host/vcs-mdcnc-workload-1 ----- - -. Change to the directory that contains the installation program and initialize the cluster deployment according to your chosen installation requirements. - -.Sample `install-config.yaml` file with multiple datacenters defined in a vSphere center - -[source,yaml] ----- ---- -compute: ---- - vsphere: - zones: - - "<machine_pool_zone_1>" - - "<machine_pool_zone_2>" ---- -controlPlane: ---- -vsphere: - zones: - - "<machine_pool_zone_1>" - - "<machine_pool_zone_2>" ---- -platform: - vsphere: - vcenters: ---- - datacenters: - - <datacenter1_name> - - <datacenter2_name> - failureDomains: - - name: <machine_pool_zone_1> - region: <region_tag_1> - zone: <zone_tag_1> - server: <fully_qualified_domain_name> - topology: - datacenter: <datacenter1> - computeCluster: "/<datacenter1>/host/<cluster1>" - networks: - - <VM_Network1_name> - datastore: "/<datacenter1>/datastore/<datastore1>" - resourcePool: "/<datacenter1>/host/<cluster1>/Resources/<resourcePool1>" - folder: "/<datacenter1>/vm/<folder1>" - - name: <machine_pool_zone_2> - region: <region_tag_2> - zone: <zone_tag_2> - server: <fully_qualified_domain_name> - topology: - datacenter: <datacenter2> - computeCluster: "/<datacenter2>/host/<cluster2>" - networks: - - <VM_Network2_name> - datastore: "/<datacenter2>/datastore/<datastore2>" - resourcePool: "/<datacenter2>/host/<cluster2>/Resources/<resourcePool2>" - folder: "/<datacenter2>/vm/<folder2>" ---- ----- diff --git a/modules/configuring-vsphere-verifying-configuration.adoc b/modules/configuring-vsphere-verifying-configuration.adoc deleted file mode 100644 index c8cfa6cd1c44..000000000000 --- a/modules/configuring-vsphere-verifying-configuration.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-vSphere-monitoring-configuration-completion{context}"] -= Verifying the configuration - -The connection configuration process updates operator statuses and control plane nodes. It takes approximately an hour to complete. During the configuration process, the nodes will reboot. Previously bound `PersistentVolumeClaims` objects might become disconnected. - -.Prerequisites -* You have saved the configuration settings in the *vSphere connection configuration* wizard. - -.Procedure - -. Check that the configuration process completed successfully: -+ --- -.. In the OpenShift Container Platform Administrator perspective, navigate to *Home -> Overview*. -.. Under *Status* click *Operators*. Wait for all operator statuses to change from *Progressing* to *All succeeded*. A *Failed* status indicates that the configuration failed. -.. Under *Status*, click *Control Plane*. Wait for the response rate of all Control Pane components to return to 100%. A *Failed* control plane component indicates that the configuration failed. --- -A failure indicates that at least one of the connection settings is incorrect. Change the settings in the *vSphere connection configuration* wizard and save the configuration again. - -. Check that you are able to bind `PersistentVolumeClaims` objects by performing the following steps: - -.. Create a `StorageClass` object using the following YAML: -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: vsphere-sc -provisioner: kubernetes.io/vsphere-volume -parameters: - datastore: YOURVCENTERDATASTORE - diskformat: thin -reclaimPolicy: Delete -volumeBindingMode: Immediate ----- -.. Create a `PersistentVolumeClaims` object using the following YAML: -+ -[source,yaml] ----- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-pvc - namespace: openshift-config - annotations: - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume - finalizers: - - kubernetes.io/pvc-protection -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: vsphere-sc - volumeMode: Filesystem ----- -+ -If you are unable to create a `PersistentVolumeClaims` object, you can troubleshoot by navigating to *Storage* -> *PersistentVolumeClaims* in the *Administrator* perspective of the {product-title} web console. \ No newline at end of file diff --git a/modules/configuring_hyperthreading_for_a_cluster.adoc b/modules/configuring_hyperthreading_for_a_cluster.adoc deleted file mode 100644 index fe86e7445ada..000000000000 --- a/modules/configuring_hyperthreading_for_a_cluster.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="configuring_hyperthreading_for_a_cluster_{context}"] -= Configuring hyperthreading for a cluster - -To configure hyperthreading for an {product-title} cluster, set the CPU threads in the performance profile to the same cores that are configured for the reserved or isolated CPU pools. - -[NOTE] -==== -If you configure a performance profile, and subsequently change the hyperthreading configuration for the host, ensure that you update the CPU `isolated` and `reserved` fields in the `PerformanceProfile` YAML to match the new configuration. -==== - -[WARNING] -==== -Disabling a previously enabled host hyperthreading configuration can cause the CPU core IDs listed in the `PerformanceProfile` YAML to be incorrect. This incorrect configuration can cause the node to become unavailable because the listed CPUs can no longer be found. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Install the OpenShift CLI (oc). - -.Procedure - -. Ascertain which threads are running on what CPUs for the host you want to configure. -+ -You can view which threads are running on the host CPUs by logging in to the cluster and running the following command: -+ -[source,terminal] ----- -$ lscpu --all --extended ----- -+ -.Example output -+ -[source,terminal] ----- -CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ -0 0 0 0 0:0:0:0 yes 4800.0000 400.0000 -1 0 0 1 1:1:1:0 yes 4800.0000 400.0000 -2 0 0 2 2:2:2:0 yes 4800.0000 400.0000 -3 0 0 3 3:3:3:0 yes 4800.0000 400.0000 -4 0 0 0 0:0:0:0 yes 4800.0000 400.0000 -5 0 0 1 1:1:1:0 yes 4800.0000 400.0000 -6 0 0 2 2:2:2:0 yes 4800.0000 400.0000 -7 0 0 3 3:3:3:0 yes 4800.0000 400.0000 ----- -+ -In this example, there are eight logical CPU cores running on four physical CPU cores. CPU0 and CPU4 are running on physical Core0, CPU1 and CPU5 are running on physical Core 1, and so on. -+ -Alternatively, to view the threads that are set for a particular physical CPU core (`cpu0` in the example below), open a command prompt and run the following: -+ -[source,terminal] ----- -$ cat /sys/devices/system/cpu/cpu0/topology/thread_siblings_list ----- -+ -.Example output -+ -[source,terminal] ----- -0-4 ----- - -. Apply the isolated and reserved CPUs in the `PerformanceProfile` YAML. For example, you can set logical cores CPU0 and CPU4 as `isolated`, and logical cores CPU1 to CPU3 and CPU5 to CPU7 as `reserved`. When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -+ -[source,yaml] ----- -... - cpu: - isolated: 0,4 - reserved: 1-3,5-7 -... ----- -+ -[NOTE] -==== -The reserved and isolated CPU pools must not overlap and together must span all available cores in the worker node. -==== - -[IMPORTANT] -==== -Hyperthreading is enabled by default on most Intel processors. If you enable hyperthreading, all threads processed by a particular core must be isolated or processed on the same core. -==== - -[id="disabling_hyperthreading_for_low_latency_applications_{context}"] -== Disabling hyperthreading for low latency applications - -When configuring clusters for low latency processing, consider whether you want to disable hyperthreading before you deploy the cluster. To disable hyperthreading, do the following: - -. Create a performance profile that is appropriate for your hardware and topology. -. Set `nosmt` as an additional kernel argument. The following example performance profile illustrates this setting: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: example-performanceprofile -spec: - additionalKernelArgs: - - nmi_watchdog=0 - - audit=0 - - mce=off - - processor.max_cstate=1 - - idle=poll - - intel_idle.max_cstate=0 - - nosmt - cpu: - isolated: 2-3 - reserved: 0-1 - hugepages: - defaultHugepagesSize: 1G - pages: - - count: 2 - node: 0 - size: 1G - nodeSelector: - node-role.kubernetes.io/performance: '' - realTimeKernel: - enabled: true ----- -+ -[NOTE] -==== -When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -==== diff --git a/modules/connected-to-disconnected-config-registry.adoc b/modules/connected-to-disconnected-config-registry.adoc deleted file mode 100644 index bed513748336..000000000000 --- a/modules/connected-to-disconnected-config-registry.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -[id="connected-to-disconnected-config-registry_{context}"] -= Configuring the cluster for the mirror registry - -After creating and mirroring the images to the mirror registry, you must modify your cluster so that pods can pull images from the mirror registry. - -You must: - -* Add the mirror registry credentials to the global pull secret. -* Add the mirror registry server certificate to the cluster. -* Create an `ImageContentSourcePolicy` custom resource (ICSP), which associates the mirror registry with the source registry. - - - -. Add mirror registry credential to the cluster global pull-secret: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull_secret_location> <1> ----- -<1> Provide the path to the new pull secret file. -+ -For example: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=.mirrorsecretconfigjson ----- - -. Add the CA-signed mirror registry server certificate to the nodes in the cluster: - -.. Create a config map that includes the server certificate for the mirror registry -+ -[source,terminal] ----- -$ oc create configmap <config_map_name> --from-file=<mirror_address_host>..<port>=$path/ca.crt -n openshift-config ----- -+ -For example: -+ -[source,terminal] ----- -S oc create configmap registry-config --from-file=mirror.registry.com..443=/root/certs/ca-chain.cert.pem -n openshift-config ----- - -.. Use the config map to update the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"<config_map_name>"}}}' --type=merge ----- -+ -For example: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-config"}}}' --type=merge ----- - -. Create an ICSP to redirect container pull requests from the online registries to the mirror registry: - -.. Create the `ImageContentSourcePolicy` custom resource: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: ImageContentSourcePolicy -metadata: - name: mirror-ocp -spec: - repositoryDigestMirrors: - - mirrors: - - mirror.registry.com:443/ocp/release <1> - source: quay.io/openshift-release-dev/ocp-release <2> - - mirrors: - - mirror.registry.com:443/ocp/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- -<1> Specifies the name of the mirror image registry and repository. -<2> Specifies the online registry and repository containing the content that is mirrored. - -.. Create the ICSP object: -+ -[source,terminal] ----- -$ oc create -f registryrepomirror.yaml ----- -+ -.Example output -[source,terminal] ----- -imagecontentsourcepolicy.operator.openshift.io/mirror-ocp created ----- -+ -{product-title} applies the changes to this CR to all nodes in the cluster. - -. Verify that the credentials, CA, and ICSP for mirror registry were added: - -.. Log into a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Check the `config.json` file for the credentials: -+ -[source,terminal] ----- -sh-4.4# cat /var/lib/kubelet/config.json ----- -+ -.Example output -[source,terminal] ----- -{"auths":{"brew.registry.redhat.io":{"xx=="},"brewregistry.stage.redhat.io":{"auth":"xxx=="},"mirror.registry.com:443":{"auth":"xx="}}} <1> ----- -<1> Ensure that the mirror registry and credentials are present. - -.. Change to the `certs.d` directory -+ -[source,terminal] ----- -sh-4.4# cd /etc/docker/certs.d/ ----- - -.. List the certificates in the `certs.d` directory: -+ -[source,terminal] ----- -sh-4.4# ls ----- -+ -.Example output ----- -image-registry.openshift-image-registry.svc.cluster.local:5000 -image-registry.openshift-image-registry.svc:5000 -mirror.registry.com:443 <1> ----- -<1> Ensure that the mirror registry is in the list. - -.. Check that the ICSP added the mirror registry to the `registries.conf` file: -+ -[source,terminal] ----- -sh-4.4# cat /etc/containers/registries.conf ----- -+ -.Example output -+ -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] - -[[registry]] - prefix = "" - location = "quay.io/openshift-release-dev/ocp-release" - mirror-by-digest-only = true - - [[registry.mirror]] - location = "mirror.registry.com:443/ocp/release" - -[[registry]] - prefix = "" - location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev" - mirror-by-digest-only = true - - [[registry.mirror]] - location = "mirror.registry.com:443/ocp/release" ----- -+ -The `registry.mirror` parameters indicate that the mirror registry is searched before the original registry. - -.. Exit the node. -+ -[source,terminal] ----- -sh-4.4# exit ----- - diff --git a/modules/connected-to-disconnected-disconnect.adoc b/modules/connected-to-disconnected-disconnect.adoc deleted file mode 100644 index cdc2f8d77d58..000000000000 --- a/modules/connected-to-disconnected-disconnect.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -[id="connected-to-disconnected-disconnect_{context}"] -= Disconnect the cluster from the network - -After mirroring all the required repositories and configuring your cluster to work as a disconnected cluster, you can disconnect the cluster from the network. - - - diff --git a/modules/connected-to-disconnected-mirror-images.adoc b/modules/connected-to-disconnected-mirror-images.adoc deleted file mode 100644 index 0eef46b81423..000000000000 --- a/modules/connected-to-disconnected-mirror-images.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-mirror-images_{context}"] -= Mirroring the images - -After the cluster is properly configured, you can mirror the images from your external repositories to the mirror repository. - -.Procedure - -. Mirror the Operator Lifecycle Manager (OLM) images: -// copied from olm-mirroring-catalog.adoc -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/redhat-operator-index:v{product-version} <mirror_registry>:<port>/olm -a <reg_creds> ----- -+ --- -where: - -`product-version`:: Specifies the tag that corresponds to the version of {product-title} to install, such as `4.8`. -`mirror_registry`:: Specifies the fully qualified domain name (FQDN) for the target registry and namespace to mirror the Operator content to, where `<namespace>` is any existing namespace on the registry. -`reg_creds`:: Specifies the location of your modified `.dockerconfigjson` file. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/redhat-operator-index:v4.8 mirror.registry.com:443/olm -a ./.dockerconfigjson --index-filter-by-os='.*' ----- - -. Mirror the content for any other Red Hat-provided Operator: -+ -[source,terminal] ----- -$ oc adm catalog mirror <index_image> <mirror_registry>:<port>/<namespace> -a <reg_creds> ----- -+ --- -where: - -`index_image`:: Specifies the index image for the catalog that you want to mirror. -`mirror_registry`:: Specifies the FQDN for the target registry and namespace to mirror the Operator content to, where `<namespace>` is any existing namespace on the registry. -`reg_creds`:: Optional: Specifies the location of your registry credentials file, if required. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/community-operator-index:v4.8 mirror.registry.com:443/olm -a ./.dockerconfigjson --index-filter-by-os='.*' ----- - -. Mirror the {product-title} image repository: -+ -[source,terminal] ----- -$ oc adm release mirror -a .dockerconfigjson --from=quay.io/openshift-release-dev/ocp-release:v<product-version>-<architecture> --to=<local_registry>/<local_repository> --to-release-image=<local_registry>/<local_repository>:v<product-version>-<architecture> ----- -+ --- -where: - -`product-version`:: Specifies the tag that corresponds to the version of {product-title} to install, such as `4.8.15-x86_64`. -`architecture`:: Specifies the type of architecture for your server, such as `x86_64`. -`local_registry`:: Specifies the registry domain name for your mirror repository. -`local_repository`:: Specifies the name of the repository to create in your registry, such as `ocp4/openshift4`. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm release mirror -a .dockerconfigjson --from=quay.io/openshift-release-dev/ocp-release:4.8.15-x86_64 --to=mirror.registry.com:443/ocp/release --to-release-image=mirror.registry.com:443/ocp/release:4.8.15-x86_64 ----- -+ -.Example output -+ -[source,terminal] -+ ----- -info: Mirroring 109 images to mirror.registry.com/ocp/release ... -mirror.registry.com:443/ - ocp/release - manifests: - sha256:086224cadce475029065a0efc5244923f43fb9bb3bb47637e0aaf1f32b9cad47 -> 4.8.15-x86_64-thanos - sha256:0a214f12737cb1cfbec473cc301aa2c289d4837224c9603e99d1e90fc00328db -> 4.8.15-x86_64-kuryr-controller - sha256:0cf5fd36ac4b95f9de506623b902118a90ff17a07b663aad5d57c425ca44038c -> 4.8.15-x86_64-pod - sha256:0d1c356c26d6e5945a488ab2b050b75a8b838fc948a75c0fa13a9084974680cb -> 4.8.15-x86_64-kube-client-agent - -….. -sha256:66e37d2532607e6c91eedf23b9600b4db904ce68e92b43c43d5b417ca6c8e63c mirror.registry.com:443/ocp/release:4.5.41-multus-admission-controller -sha256:d36efdbf8d5b2cbc4dcdbd64297107d88a31ef6b0ec4a39695915c10db4973f1 mirror.registry.com:443/ocp/release:4.5.41-cluster-kube-scheduler-operator -sha256:bd1baa5c8239b23ecdf76819ddb63cd1cd6091119fecdbf1a0db1fb3760321a2 mirror.registry.com:443/ocp/release:4.5.41-aws-machine-controllers -info: Mirroring completed in 2.02s (0B/s) - -Success -Update image: mirror.registry.com:443/ocp/release:4.5.41-x86_64 -Mirror prefix: mirror.registry.com:443/ocp/release ----- - -. Mirror any other registries, as needed: -+ -[source,terminal] ----- -$ oc image mirror <online_registry>/my/image:latest <mirror_registry> ----- diff --git a/modules/connected-to-disconnected-prepare-mirror.adoc b/modules/connected-to-disconnected-prepare-mirror.adoc deleted file mode 100644 index ff61826f6bba..000000000000 --- a/modules/connected-to-disconnected-prepare-mirror.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-prepare-mirror_{context}"] -= Preparing the cluster for mirroring - -Before disconnecting your cluster, you must mirror, or copy, the images to a mirror registry that is reachable by every node in your disconnected cluster. In order to mirror the images, you must prepare your cluster by: - -* Adding the mirror registry certificates to the list of trusted CAs on your host. -* Creating a `.dockerconfigjson` file that contains your image pull secret, which is from the `cloud.openshift.com` token. - -.Procedure - -. Configuring credentials that allow image mirroring: - -.. Add the CA certificate for the mirror registry, in the simple PEM or DER file formats, to the list of trusted CAs. For example: -+ -[source,terminal] ----- -$ cp </path/to/cert.crt> /usr/share/pki/ca-trust-source/anchors/ ----- -+ --- -where:: -+ -`</path/to/cert.crt>`:: Specifies the path to the certificate on your local file system. --- - -.. Update the CA trust. For example, in Linux: -+ -[source,terminal] ----- -$ update-ca-trust ----- - -.. Extract the `.dockerconfigjson` file from the global pull secret: -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --confirm --to=. ----- -+ -.Example output -[source,terminal] ----- -.dockerconfigjson ----- - -.. Edit the `.dockerconfigjson` file to add your mirror registry and authentication credentials and save it as a new file: -// copied from olm-accessing-images-private-registries -+ -[source,terminal] ----- -{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}},"<registry>:<port>/<namespace>/":{"auth":"<token>"}}} ----- -+ -where: -+ -`<local_registry>`:: Specifies the registry domain name, and optionally the port, that your mirror registry uses to serve content. -`auth`:: Specifies the base64-encoded user name and password for your mirror registry. -`<registry>:<port>/<namespace>`:: Specifies the mirror registry details. -`<token>`:: Specifies the base64-encoded `username:password` for your mirror registry. -+ -For example: -+ -[source,terminal] ----- -$ {"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0Y3UjhGOVZPT0lOMEFaUjdPUzRGTA==","email":"user@example.com"}, -"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGOVZPT0lOMEFaUGSTd4VGVGVUjdPUzRGTA==","email":"user@example.com"}, -"registry.connect.redhat.com"{"auth":"NTE3MTMwNDB8dWhjLTFEZlN3VHkxOSTd4VGVGVU1MdTpleUpoYkdjaUailA==","email":"user@example.com"}, -"registry.redhat.io":{"auth":"NTE3MTMwNDB8dWhjLTFEZlN3VH3BGSTd4VGVGVU1MdTpleUpoYkdjaU9fZw==","email":"user@example.com"}, -"registry.svc.ci.openshift.org":{"auth":"dXNlcjpyWjAwWVFjSEJiT2RKVW1pSmg4dW92dGp1SXRxQ3RGN1pwajJhN1ZXeTRV"},"my-registry:5000/my-namespace/":{"auth":"dXNlcm5hbWU6cGFzc3dvcmQ="}}} ----- diff --git a/modules/connected-to-disconnected-restore-insights.adoc b/modules/connected-to-disconnected-restore-insights.adoc deleted file mode 100644 index 51bc38646bfd..000000000000 --- a/modules/connected-to-disconnected-restore-insights.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-restore-insights_{context}"] -= Restoring a degraded Insights Operator - -Disconnecting the cluster from the network necessarily causes the cluster to lose the Internet connection. The Insights Operator becomes degraded because it requires access to link:https://console.redhat.com[Red Hat Insights]. - -This topic describes how to recover from a degraded Insights Operator. - -.Procedure - -. Edit your `.dockerconfigjson` file to remove the `cloud.openshift.com` entry, for example: -+ -[source,terminal] ----- -"cloud.openshift.com":{"auth":"<hash>","email":"user@example.com"} ----- - -. Save the file. - -. Update the cluster secret with the edited `.dockerconfigjson` file: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=./.dockerconfigjson ----- - -. Verify that the Insights Operator is no longer degraded: -+ -[source,terminal] ----- -$ oc get co insights ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -insights 4.5.41 True False False 3d ----- - - diff --git a/modules/connected-to-disconnected-restore.adoc b/modules/connected-to-disconnected-restore.adoc deleted file mode 100644 index b670b45bcc09..000000000000 --- a/modules/connected-to-disconnected-restore.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-restore_{context}"] -= Restoring the network - -If you want to reconnect a disconnected cluster and pull images from online registries, delete the cluster's ImageContentSourcePolicy (ICSP) objects. Without the ICSP, pull requests to external registries are no longer redirected to the mirror registry. - -.Procedure - -. View the ICSP objects in your cluster: -+ -[source,terminal] ----- -$ oc get imagecontentsourcepolicy ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -mirror-ocp 6d20h -ocp4-index-0 6d18h -qe45-index-0 6d15h ----- - -. Delete all the ICSP objects you created when disconnecting your cluster: -+ -[source,terminal] ----- -$ oc delete imagecontentsourcepolicy <icsp_name> <icsp_name> <icsp_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc delete imagecontentsourcepolicy mirror-ocp ocp4-index-0 qe45-index-0 ----- -+ -.Example output -[source,terminal] ----- -imagecontentsourcepolicy.operator.openshift.io "mirror-ocp" deleted -imagecontentsourcepolicy.operator.openshift.io "ocp4-index-0" deleted -imagecontentsourcepolicy.operator.openshift.io "qe45-index-0" deleted ----- - -. Wait for all the nodes to restart and return to the READY status and verify that the `registries.conf` file is pointing to the original registries and not the mirror registries: - -.. Log into a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Examine the `registries.conf` file: -+ -[source,terminal] ----- -sh-4.4# cat /etc/containers/registries.conf ----- -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] <1> ----- -<1> The `registry` and `registry.mirror` entries created by the ICSPs you deleted are removed. diff --git a/modules/connected-to-disconnected-verify.adoc b/modules/connected-to-disconnected-verify.adoc deleted file mode 100644 index c0f6cceb6332..000000000000 --- a/modules/connected-to-disconnected-verify.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-verify_{context}"] -= Ensure applications continue to work - -Before disconnecting the cluster from the network, ensure that your cluster is working as expected and all of your applications are working as expected. - -.Procedure - -Use the following commands to check the status of your cluster: - -* Ensure your pods are running: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces ----- -+ -.Example output -[source,terinal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-0 1/1 Running 0 39m -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-1 1/1 Running 0 39m -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-2 1/1 Running 0 39m -openshift-apiserver-operator openshift-apiserver-operator-79c7c646fd-5rvr5 1/1 Running 3 45m -openshift-apiserver apiserver-b944c4645-q694g 2/2 Running 0 29m -openshift-apiserver apiserver-b944c4645-shdxb 2/2 Running 0 31m -openshift-apiserver apiserver-b944c4645-x7rf2 2/2 Running 0 33m - ... ----- - -* Ensure your nodes are in the READY status: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-47ltxtb-f76d1-mrffg-master-0 Ready master 42m v1.27.3 -ci-ln-47ltxtb-f76d1-mrffg-master-1 Ready master 42m v1.27.3 -ci-ln-47ltxtb-f76d1-mrffg-master-2 Ready master 42m v1.27.3 -ci-ln-47ltxtb-f76d1-mrffg-worker-a-gsxbz Ready worker 35m v1.27.3 -ci-ln-47ltxtb-f76d1-mrffg-worker-b-5qqdx Ready worker 35m v1.27.3 -ci-ln-47ltxtb-f76d1-mrffg-worker-c-rjkpq Ready worker 34m v1.27.3 ----- diff --git a/modules/console-operator.adoc b/modules/console-operator.adoc deleted file mode 100644 index 6937c3e17d2d..000000000000 --- a/modules/console-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -// operators/operator-reference.adoc -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="console-operator_{context}"] -ifdef::operator-ref[= Console Operator] -ifdef::cluster-caps[= Console capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Console Operator is an optional cluster capability that can be disabled by cluster administrators during installation. If you disable the Console Operator at installation, your cluster is still supported and upgradable. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Console Operator provides the features for the `Console` capability. - -endif::cluster-caps[] - -The Console Operator installs and maintains the {product-title} web console on a cluster. The Console Operator is installed by default and automatically maintains a console. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/console-operator[console-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] -ifeval::["{context}" == "cluster-capabilities"] -:!cluster-caps: -endif::[] diff --git a/modules/consuming-huge-pages-resource-using-the-downward-api.adoc b/modules/consuming-huge-pages-resource-using-the-downward-api.adoc deleted file mode 100644 index 37b68a02f97b..000000000000 --- a/modules/consuming-huge-pages-resource-using-the-downward-api.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc - -:file-name: hugepages-volume-pod.yaml - -:_content-type: PROCEDURE -[id="consuming-huge-pages-resource-using-the-downward-api_{context}"] -= Consuming huge pages resources using the Downward API - -You can use the Downward API to inject information about the huge pages resources that are consumed by a container. - -You can inject the resource allocation as environment variables, a volume plugin, or both. Applications that you develop and run in the container can determine the resources that are available by reading the environment variables or files in the specified volumes. - -.Procedure - -. Create a `{file-name}` file that is similar to the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - generateName: hugepages-volume- - labels: - app: hugepages-example -spec: - containers: - - securityContext: - capabilities: - add: [ "IPC_LOCK" ] - image: rhel7:latest - command: - - sleep - - inf - name: example - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - - mountPath: /etc/podinfo - name: podinfo - resources: - limits: - hugepages-1Gi: 2Gi - memory: "1Gi" - cpu: "1" - requests: - hugepages-1Gi: 2Gi - env: - - name: REQUESTS_HUGEPAGES_1GI <.> - valueFrom: - resourceFieldRef: - containerName: example - resource: requests.hugepages-1Gi - volumes: - - name: hugepage - emptyDir: - medium: HugePages - - name: podinfo - downwardAPI: - items: - - path: "hugepages_1G_request" <.> - resourceFieldRef: - containerName: example - resource: requests.hugepages-1Gi - divisor: 1Gi ----- -<.> Specifies to read the resource use from `requests.hugepages-1Gi` and expose the value as the `REQUESTS_HUGEPAGES_1GI` environment variable. -<.> Specifies to read the resource use from `requests.hugepages-1Gi` and expose the value as the file `/etc/podinfo/hugepages_1G_request`. - -. Create the pod from the `{file-name}` file: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create -f {file-name} ----- - -.Verification - -. Check the value of the `REQUESTS_HUGEPAGES_1GI` environment variable: -+ -[source,terminal] ----- -$ oc exec -it $(oc get pods -l app=hugepages-example -o jsonpath='{.items[0].metadata.name}') \ - -- env | grep REQUESTS_HUGEPAGES_1GI ----- -+ -.Example output -[source,terminal] ----- -REQUESTS_HUGEPAGES_1GI=2147483648 ----- - -. Check the value of the `/etc/podinfo/hugepages_1G_request` file: -+ -[source,terminal] ----- -$ oc exec -it $(oc get pods -l app=hugepages-example -o jsonpath='{.items[0].metadata.name}') \ - -- cat /etc/podinfo/hugepages_1G_request ----- -+ -.Example output -[source,terminal] ----- -2 ----- - -:!file-name: diff --git a/modules/container-benefits.adoc b/modules/container-benefits.adoc deleted file mode 100644 index 769dca351a08..000000000000 --- a/modules/container-benefits.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_architecture/osd-architecture.adoc - -[id="container-benefits_{context}"] -= The benefits of containerized applications - - -Applications were once expected to be installed on operating systems that included all of the dependencies for the application. However, containers provide a standard way to package your application code, configurations, and dependencies into a single unit that can run as a resource-isolated process on a compute server. To run your app in Kubernetes on {product-title}, you must first containerize your app by creating a container image that you store in a container registry. - -[id="operating-system-benefits_{context}"] -== Operating system benefits - -Containers use small, dedicated Linux operating systems without a kernel. The file system, networking, cgroups, process tables, and namespaces are separate from the host Linux system, but the containers can integrate with the -hosts seamlessly when necessary. Being based on Linux allows containers to use all the advantages that come with the open source development model of rapid innovation. - -Because each container uses a dedicated operating system, you can deploy applications that require conflicting software dependencies on the same host. Each container carries its own dependent software and manages its own interfaces, such as networking and file systems, so applications never need to compete for those assets. - -[id="deployment-scaling-benefits_{context}"] -== Deployment benefits - -If you employ rolling upgrades between major releases of your application, you can continuously improve your applications without downtime and still maintain compatibility with the current release. - -You can also deploy and test a new version of an application alongside the existing version. Deploy the new application version in addition to the current version. If the container passes your tests, simply deploy more new containers and remove the old ones.  - -Since all the software dependencies for an application are resolved within the container itself, you can use a generic operating system on each host in your data center. You do not need to configure a specific operating system for each application host. When your data center needs more capacity, you can deploy another generic host system. diff --git a/modules/containers-about.adoc b/modules/containers-about.adoc deleted file mode 100644 index a6523095c368..000000000000 --- a/modules/containers-about.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="containers-about_{context}"] -= Containers - -The basic units of {product-title} applications are called containers. link:https://access.redhat.com/articles/1353593[Linux container technologies] are lightweight mechanisms for isolating running processes so that they are limited to interacting with only their designated resources. The word container is defined as a specific running or paused instance of a container image. - -Many application instances can be running in containers on a single host without visibility into each others' processes, files, network, and so on. Typically, each container provides a single service, often called a micro-service, such as a web server or a database, though containers can be used for arbitrary workloads. - -The Linux kernel has been incorporating capabilities for container technologies for years. The Docker project developed a convenient management interface for Linux containers on a host. More recently, the link:https://github.com/opencontainers/[Open Container Initiative] has developed open standards for container formats and container runtimes. {product-title} and Kubernetes add the ability to orchestrate OCI- and Docker-formatted containers across multi-host installations. - -Though you do not directly interact with container runtimes when using {product-title}, understanding their capabilities and terminology is important for understanding their role in {product-title} and how your applications function inside of containers. - -Tools such as link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/managing_containers/#using_podman_to_work_with_containers[podman] can be used to replace `docker` command-line tools for running and managing containers directly. Using `podman`, you can experiment with containers separately from {product-title}. diff --git a/modules/containers-signature-verify-application.adoc b/modules/containers-signature-verify-application.adoc deleted file mode 100644 index 664538bb45f5..000000000000 --- a/modules/containers-signature-verify-application.adoc +++ /dev/null @@ -1,259 +0,0 @@ -// Module included in the following assemblies: -// -// * security/container_security/security-container-signature.adoc - -:_content-type: PROCEDURE -[id="containers-signature-verify-application_{context}"] -= Verifying the signature verification configuration -After you apply the machine configs to the cluster, the Machine Config Controller detects the new `MachineConfig` object and generates a new `rendered-worker-<hash>` version. - -.Prerequisites -* You enabled signature verification by using a machine config file. - -.Procedure - -. On the command line, run the following command to display information about a desired worker: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output of initial worker monitoring -+ -[source,terminal] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= -Annotations: <none> -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool -Metadata: - Creation Timestamp: 2019-12-19T02:02:12Z - Generation: 3 - Resource Version: 16229 - Self Link: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/worker - UID: 92697796-2203-11ea-b48c-fa163e3940e5 -Spec: - Configuration: - Name: rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 51-worker-rh-registry-trust - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Machine Config Selector: - Match Labels: - machineconfiguration.openshift.io/role: worker - Node Selector: - Match Labels: - node-role.kubernetes.io/worker: - Paused: false -Status: - Conditions: - Last Transition Time: 2019-12-19T02:03:27Z - Message: - Reason: - Status: False - Type: RenderDegraded - Last Transition Time: 2019-12-19T02:03:43Z - Message: - Reason: - Status: False - Type: NodeDegraded - Last Transition Time: 2019-12-19T02:03:43Z - Message: - Reason: - Status: False - Type: Degraded - Last Transition Time: 2019-12-19T02:28:23Z - Message: - Reason: - Status: False - Type: Updated - Last Transition Time: 2019-12-19T02:28:23Z - Message: All nodes are updating to rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Reason: - Status: True - Type: Updating - Configuration: - Name: rendered-worker-d9b3f4ffcfd65c30dcf591a0e8cf9b2e - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Degraded Machine Count: 0 - Machine Count: 1 - Observed Generation: 3 - Ready Machine Count: 0 - Unavailable Machine Count: 1 - Updated Machine Count: 0 -Events: <none> ----- - -. Run the `oc describe` command again: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output after the worker is updated -+ -[source,terminal] ----- -... - Last Transition Time: 2019-12-19T04:53:09Z - Message: All nodes are updated with rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Reason: - Status: True - Type: Updated - Last Transition Time: 2019-12-19T04:53:09Z - Message: - Reason: - Status: False - Type: Updating - Configuration: - Name: rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 51-worker-rh-registry-trust - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 4 - Ready Machine Count: 3 - Unavailable Machine Count: 0 - Updated Machine Count: 3 -... ----- -+ -[NOTE] -==== -The `Observed Generation` parameter shows an increased count based on the generation of the controller-produced configuration. This controller updates this value even if it fails to process the specification and generate a revision. The `Configuration Source` value points to the `51-worker-rh-registry-trust` configuration. -==== - -. Confirm that the `policy.json` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/policy.json ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -{ - "default": [ - { - "type": "insecureAcceptAnything" - } - ], - "transports": { - "docker": { - "registry.access.redhat.com": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ], - "registry.redhat.io": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ] - }, - "docker-daemon": { - "": [ - { - "type": "insecureAcceptAnything" - } - ] - } - } -} ----- - -. Confirm that the `registry.redhat.io.yaml` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/registries.d/registry.redhat.io.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -docker: - registry.redhat.io: - sigstore: https://registry.redhat.io/containers/sigstore ----- - -. Confirm that the `registry.access.redhat.com.yaml` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/registries.d/registry.access.redhat.com.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -docker: - registry.access.redhat.com: - sigstore: https://access.redhat.com/webassets/docker/content/sigstore ----- diff --git a/modules/containers-signature-verify-enable.adoc b/modules/containers-signature-verify-enable.adoc deleted file mode 100644 index 213108e5586d..000000000000 --- a/modules/containers-signature-verify-enable.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * security/container_security/security-container-signature.adoc - -:_content-type: PROCEDURE -[id="containers-signature-verify-enable_{context}"] -= Enabling signature verification for Red Hat Container Registries -Enabling container signature validation for Red Hat Container Registries requires writing a signature verification policy file specifying the keys to verify images from these registries. For RHEL8 nodes, the registries are already defined in `/etc/containers/registries.d` by default. - -.Procedure - -. Create a Butane config file, `51-worker-rh-registry-trust.bu`, containing the necessary configuration for the worker nodes. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: 51-worker-rh-registry-trust - labels: - machineconfiguration.openshift.io/role: worker -storage: - files: - - path: /etc/containers/policy.json - mode: 0644 - overwrite: true - contents: - inline: | - { - "default": [ - { - "type": "insecureAcceptAnything" - } - ], - "transports": { - "docker": { - "registry.access.redhat.com": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ], - "registry.redhat.io": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ] - }, - "docker-daemon": { - "": [ - { - "type": "insecureAcceptAnything" - } - ] - } - } - } ----- - -. Use Butane to generate a machine config YAML file, `51-worker-rh-registry-trust.yaml`, containing the file to be written to disk on the worker nodes: -+ -[source,terminal] ----- -$ butane 51-worker-rh-registry-trust.bu -o 51-worker-rh-registry-trust.yaml ----- - -. Apply the created machine config: -+ -[source,terminal] ----- -$ oc apply -f 51-worker-rh-registry-trust.yaml ----- - -. Check that the worker machine config pool has rolled out with the new machine config: - -.. Check that the new machine config was created: -+ -[source,terminal] ----- -$ oc get mc ----- -+ -.Sample output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -00-worker a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-master-container-runtime a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-master-kubelet a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-worker-container-runtime a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-worker-kubelet a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -51-master-rh-registry-trust 3.2.0 13s -51-worker-rh-registry-trust 3.2.0 53s <1> -99-master-generated-crio-seccomp-use-default 3.2.0 25m -99-master-generated-registries a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -99-master-ssh 3.2.0 28m -99-worker-generated-crio-seccomp-use-default 3.2.0 25m -99-worker-generated-registries a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -99-worker-ssh 3.2.0 28m -rendered-master-af1e7ff78da0a9c851bab4be2777773b a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 8s -rendered-master-cd51fd0c47e91812bfef2765c52ec7e6 a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 24m -rendered-worker-2b52f75684fbc711bd1652dd86fd0b82 a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 24m -rendered-worker-be3b3bce4f4aa52a62902304bac9da3c a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 48s <2> ----- -<1> New machine config -<2> New rendered machine config - -.. Check that the worker machine config pool is updating with the new machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-af1e7ff78da0a9c851bab4be2777773b True False False 3 3 3 0 30m -worker rendered-worker-be3b3bce4f4aa52a62902304bac9da3c False True False 3 0 0 0 30m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the new machine config. When the field becomes `False`, the worker machine config pool has rolled out to the new machine config. - -. If your cluster uses any RHEL7 worker nodes, when the worker machine config pool is updated, create YAML files on those nodes in the `/etc/containers/registries.d` directory, which specify the location of the detached signatures for a given registry server. The following example works only for images hosted in `registry.access.redhat.com` and `registry.redhat.io`. - -.. Start a debug session to each RHEL7 worker node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Change your root directory to `/host`: -+ -[source,terminal] ----- -sh-4.2# chroot /host ----- - -.. Create a `/etc/containers/registries.d/registry.redhat.io.yaml` file that contains the following: -+ -[source,terminal] ----- -docker: - registry.redhat.io: - sigstore: https://registry.redhat.io/containers/sigstore ----- - -.. Create a `/etc/containers/registries.d/registry.access.redhat.com.yaml` file that contains the following: -+ -[source,terminal] ----- -docker: - registry.access.redhat.com: - sigstore: https://access.redhat.com/webassets/docker/content/sigstore ----- - -.. Exit the debug session. diff --git a/modules/contributing-quick-starts.adoc b/modules/contributing-quick-starts.adoc deleted file mode 100644 index aa0c7b0a2523..000000000000 --- a/modules/contributing-quick-starts.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/creating-quick-start-tutorials.adoc - -:_content-type: PROCEDURE -[id="contributing-quick-starts_{context}"] -= Contributing quick starts - -{product-title} introduces the quick start custom resource, which is defined by a `ConsoleQuickStart` object. Operators and administrators can use this resource to contribute quick starts to the cluster. - -.Prerequisites - -* You must have cluster administrator privileges. - -.Procedure - -. To create a new quick start, run: -+ -[source,yaml] ----- -$ oc get -o yaml consolequickstart spring-with-s2i > my-quick-start.yaml ----- - -. Run: -+ -[source,yaml] ----- -$ oc create -f my-quick-start.yaml ----- - -. Update the YAML file using the guidance outlined in this documentation. - -. Save your edits. - -[id="viewing-quick-start-api-documentation_{context}"] -== Viewing the quick start API documentation - -.Procedure - -* To see the quick start API documentation, run: -+ -[source,terminal] ----- -$ oc explain consolequickstarts ----- - -Run `oc explain -h` for more information about `oc explain` usage. diff --git a/modules/control-plane-machine-set-operator.adoc b/modules/control-plane-machine-set-operator.adoc deleted file mode 100644 index 23fe485c8489..000000000000 --- a/modules/control-plane-machine-set-operator.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="control-plane-machine-set-operator_{context}"] -= Control Plane Machine Set Operator - -[NOTE] -==== -This Operator is available for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere. -==== - -[discrete] -== Purpose - -The Control Plane Machine Set Operator automates the management of control plane machine resources within an {product-title} cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-control-plane-machine-set-operator[cluster-control-plane-machine-set-operator] - -[discrete] -== CRDs - -* `controlplanemachineset.machine.openshift.io` -** Scope: Namespaced -** CR: `ControlPlaneMachineSet` -** Validation: Yes \ No newline at end of file diff --git a/modules/copying-files-pods-and-containers.adoc b/modules/copying-files-pods-and-containers.adoc deleted file mode 100644 index 42c855304989..000000000000 --- a/modules/copying-files-pods-and-containers.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="copying-files-pods-and-containers_{context}"] -= Copying files to and from pods and containers - -You can copy files to and from a pod to test configuration changes or gather diagnostic information. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Copy a file to a pod: -+ -[source,terminal] ----- -$ oc cp <local_path> <pod_name>:/<path> -c <container_name> <1> ----- -<1> The first container in a pod is selected if the `-c` option is not specified. - -. Copy a file from a pod: -+ -[source,terminal] ----- -$ oc cp <pod_name>:/<path> -c <container_name><local_path> <1> ----- -<1> The first container in a pod is selected if the `-c` option is not specified. -+ -[NOTE] -==== -For `oc cp` to function, the `tar` binary must be available within the container. -==== diff --git a/modules/core-user-password.adoc b/modules/core-user-password.adoc deleted file mode 100644 index b96cff38cb1c..000000000000 --- a/modules/core-user-password.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="core-user-password_{context}"] -= Changing the core user password for node access - -By default, {op-system-first} creates a user named `core` on the nodes in your cluster. You can use the `core` user to access the node through a cloud provider serial console or a bare metal baseboard controller manager (BMC). This can be helpful, for example, if a node is down and you cannot access that node by using SSH or the `oc debug node` command. However, by default, there is no password for this user, so you cannot log in without creating one. - -You can create a password for the `core` user by using a machine config. The Machine Config Operator (MCO) assigns the password and injects the password into the `/etc/shadow` file, allowing you to log in with the `core` user. The MCO does not examine the password hash. As such, the MCO cannot report if there is a problem with the password. - -[NOTE] -==== -* The password works only through a cloud provider serial console or a BMC. It does not work with SSH. - -* If you have a machine config that includes an `/etc/shadow` file or a systemd unit that sets a password, it takes precedence over the password hash. -==== - -You can change the password, if needed, by editing the machine config you used to create the password. Also, you can remove the password by deleting the machine config. Deleting the machine config does not remove the user account. - -.Prerequisites - -* Create a hashed password by using a tool that is supported by your operating system. - -.Procedure - -. Create a machine config file that contains the `core` username and the hashed password: -+ -[source,terminal] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: set-core-user-password -spec: - config: - ignition: - version: 3.2.0 - passwd: - users: - - name: core <1> - passwordHash: <password> <2> ----- -<1> This must be `core`. -<2> The hashed password to use with the `core` account. - -. Create the machine config by running the following command: -+ -[source,yaml] ----- -$ oc create -f <file-name>.yaml ----- -+ -The nodes do not reboot and should become available in a few moments. You can use the `oc get mcp` to watch for the machine config pools to be updated, as shown in the following example: -+ ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-d686a3ffc8fdec47280afec446fce8dd True False False 3 3 3 0 64m -worker rendered-worker-4605605a5b1f9de1d061e9d350f251e5 False True False 3 0 0 0 64m ----- - -.Verification - -. After the nodes return to the `UPDATED=True` state, start a debug session for a node by running the following command: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell by running the following command: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -. Check the contents of the `/etc/shadow` file: -+ -.Example output -[source,terminal] ----- -... -core:$6$2sE/010goDuRSxxv$o18K52wor.wIwZp:19418:0:99999:7::: -... ----- -+ -The hashed password is assigned to the `core` user. - diff --git a/modules/coreos-layering-configuring.adoc b/modules/coreos-layering-configuring.adoc deleted file mode 100644 index 30c40753ac01..000000000000 --- a/modules/coreos-layering-configuring.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: PROCEDURE -[id="coreos-layering-configuring_{context}"] -= Applying a {op-system} custom layered image - -You can easily configure {op-system-first} image layering on the nodes in specific machine config pools. The Machine Config Operator (MCO) reboots those nodes with the new custom layered image, overriding the base {op-system-first} image. - -To apply a custom layered image to your cluster, you must have the custom layered image in a repository that your cluster can access. Then, create a `MachineConfig` object that points to the custom layered image. You need a separate `MachineConfig` object for each machine config pool that you want to configure. - -[IMPORTANT] -==== -When you configure a custom layered image, {product-title} no longer automatically updates any node that uses the custom layered image. You become responsible for manually updating your nodes as appropriate. If you roll back the custom layer, {product-title} will again automatically update the node. See the Additional resources section that follows for important information about updating nodes that use a custom layered image. -==== - -.Prerequisites - -* You must create a custom layered image that is based on an {product-title} image digest, not a tag. -+ -[NOTE] -==== -You should use the same base {op-system} image that is installed on the rest of your cluster. Use the `oc adm release info --image-for rhel-coreos` command to obtain the base image being used in your cluster. -==== -+ -For example, the following Containerfile creates a custom layered image from an {product-title} {product-version} image and a Hotfix package: -+ -.Example Containerfile for a custom layer image -[source,yaml] ----- -# Using a 4.12.0 image -FROM quay.io/openshift-release/ocp-release@sha256... <1> -#Install hotfix rpm -RUN rpm-ostree override replace https://example.com/hotfixes/haproxy-1.0.16-5.el8.src.rpm && \ <2> - rpm-ostree cleanup -m && \ - ostree container commit ----- -<1> Specifies the {op-system} base image of your cluster. -<2> Specifies the path to the Hotfix package. -+ -[NOTE] -==== -Instructions on how to create a Containerfile are beyond the scope of this documentation. -==== - -* Because the process for building a custom layered image is performed outside of the cluster, you must use the `--authfile /path/to/pull-secret` option with Podman or Buildah. Alternatively, to have the pull secret read by these tools automatically, you can add it to one of the default file locations: `~/.docker/config.json`, `$XDG_RUNTIME_DIR/containers/auth.json`, `~/.docker/config.json`, or `~/.dockercfg`. Refer to the `containers-auth.json` man page for more information. - -* You must push the custom layered image to a repository that your cluster can access. - -.Procedure - -. Create a machine config file. - -.. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker <1> - name: os-layer-custom -spec: - osImageURL: quay.io/my-registry/custom-image@sha256... <2> ----- -<1> Specifies the machine config pool to apply the custom layered image. -<2> Specifies the path to the custom layered image in the repository. - -.. Create the `MachineConfig` object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- -+ -[IMPORTANT] -==== -It is strongly recommended that you test your images outside of your production environment before rolling out to your cluster. -==== - -.Verification - -You can verify that the custom layered image is applied by performing any of the following checks: - -. Check that the worker machine config pool has rolled out with the new machine config: - -.. Check that the new machine config is created: -+ -[source,terminal] ----- -$ oc get mc ----- -+ -.Sample output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -00-worker 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-master-container-runtime 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-master-kubelet 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-worker-container-runtime 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-worker-kubelet 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-master-generated-registries 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-master-ssh 3.2.0 98m -99-worker-generated-registries 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-worker-ssh 3.2.0 98m -os-layer-custom 10s <1> -rendered-master-15961f1da260f7be141006404d17d39b 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -rendered-worker-5aff604cb1381a4fe07feaf1595a797e 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -rendered-worker-5de4837625b1cbc237de6b22bc0bc873 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 4s <2> ----- -<1> New machine config -<2> New rendered machine config - -.. Check that the `osImageURL` value in the new machine config points to the expected image: -+ -[source,terminal] ----- -$ oc describe mc rendered-master-4e8be63aef68b843b546827b6ebe0913 ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -Name: rendered-master-4e8be63aef68b843b546827b6ebe0913 -Namespace: -Labels: <none> -Annotations: machineconfiguration.openshift.io/generated-by-controller-version: 8276d9c1f574481043d3661a1ace1f36cd8c3b62 - machineconfiguration.openshift.io/release-image-version: {product-version}.0-ec.3 -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfig -... - Os Image URL: quay.io/my-registry/custom-image@sha256... ----- - -.. Check that the associated machine config pool is updating with the new machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-6faecdfa1b25c114a58cf178fbaa45e2 True False False 3 3 3 0 39m -worker rendered-worker-6b000dbc31aaee63c6a2d56d04cd4c1b False True False 3 0 0 0 39m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the new machine config. When the field becomes `False`, the worker machine config pool has rolled out to the new machine config. - -.. Check the nodes to see that scheduling on the nodes is disabled. This indicates that the change is being applied: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.27.3 -ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.27.3 -ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.27.3 ----- - -. When the node is back in the `Ready` state, check that the node is using the custom layered image: - -.. Open an `oc debug` session to the node. For example: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-155-125.us-west-1.compute.internal ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Run the `rpm-ostree status` command to view that the custom layered image is in use: -+ -[source,terminal] ----- -sh-4.4# sudo rpm-ostree status ----- -+ -.Example output -+ ----- -State: idle -Deployments: -* ostree-unverified-registry:quay.io/my-registry/... - Digest: sha256:... ----- - diff --git a/modules/coreos-layering-removing.adoc b/modules/coreos-layering-removing.adoc deleted file mode 100644 index f5ca8aac3e5a..000000000000 --- a/modules/coreos-layering-removing.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: PROCEDURE -[id="coreos-layering-removing_{context}"] -= Removing a {op-system} custom layered image - -You can easily revert {op-system-first} image layering from the nodes in specific machine config pools. The Machine Config Operator (MCO) reboots those nodes with the cluster base {op-system-first} image, overriding the custom layered image. - -To remove a {op-system-first} custom layered image from your cluster, you need to delete the machine config that applied the image. - -.Procedure - -. Delete the machine config that applied the custom layered image. -+ -[source,terminal] ----- -$ oc delete mc os-layer-custom ----- -+ -After deleting the machine config, the nodes reboot. - -.Verification - -You can verify that the custom layered image is removed by performing any of the following checks: - -. Check that the worker machine config pool is updating with the previous machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-6faecdfa1b25c114a58cf178fbaa45e2 True False False 3 3 3 0 39m -worker rendered-worker-6b000dbc31aaee63c6a2d56d04cd4c1b False True False 3 0 0 0 39m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the previous machine config. When the field becomes `False`, the worker machine config pool has rolled out to the previous machine config. - -. Check the nodes to see that scheduling on the nodes is disabled. This indicates that the change is being applied: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.27.3 -ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.27.3 -ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.27.3 -ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.27.3 ----- - -. When the node is back in the `Ready` state, check that the node is using the base image: - -.. Open an `oc debug` session to the node. For example: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-155-125.us-west-1.compute.internal ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Run the `rpm-ostree status` command to view that the custom layered image is in use: -+ -[source,terminal] ----- -sh-4.4# sudo rpm-ostree status ----- -+ -.Example output -+ ----- -State: idle -Deployments: -* ostree-unverified-registry:podman pull quay.io/openshift-release-dev/ocp-release@sha256:e2044c3cfebe0ff3a99fc207ac5efe6e07878ad59fd4ad5e41f88cb016dacd73 - Digest: sha256:e2044c3cfebe0ff3a99fc207ac5efe6e07878ad59fd4ad5e41f88cb016dacd73 ----- diff --git a/modules/coreos-layering-updating.adoc b/modules/coreos-layering-updating.adoc deleted file mode 100644 index d8b07552bbe7..000000000000 --- a/modules/coreos-layering-updating.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: REFERENCE -[id="coreos-layering-updating_{context}"] -= Updating with a {op-system} custom layered image - -When you configure {op-system-first} image layering, {product-title} no longer automatically updates the node pool that uses the custom layered image. You become responsible to manually update your nodes as appropriate. - -To update a node that uses a custom layered image, follow these general steps: - -. The cluster automatically upgrades to version x.y.z+1, except for the nodes that use the custom layered image. - -. You could then create a new Containerfile that references the updated {product-title} image and the RPM that you had previously applied. - -. Create a new machine config that points to the updated custom layered image. - -Updating a node with a custom layered image is not required. However, if that node gets too far behind the current {product-title} version, you could experience unexpected results. - diff --git a/modules/cpms-changing-aws-instance-type.adoc b/modules/cpms-changing-aws-instance-type.adoc deleted file mode 100644 index ceebcbbb0aeb..000000000000 --- a/modules/cpms-changing-aws-instance-type.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "recommended-control-plane-practices"] -:scale-host: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso-using: -endif::[] - -:_content-type: PROCEDURE -[id="cpms-changing-aws-instance-type_{context}"] -= Changing the Amazon Web Services instance type by using a control plane machine set - -You can change the Amazon Web Services (AWS) instance type that your control plane machines use by updating the specification in the control plane machine set custom resource (CR). - -.Prerequisites - -* Your AWS cluster uses a control plane machine set. - -.Procedure - -ifdef::scale-host[] -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-machine-api edit controlplanemachineset.machine.openshift.io cluster ----- -endif::scale-host[] - -. Edit the following line under the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - value: - ... - instanceType: <compatible_aws_instance_type> <1> ----- -<1> Specify a larger AWS instance type with the same base as the previous selection. For example, you can change `m6i.xlarge` to `m6i.2xlarge` or `m6i.4xlarge`. - -. Save your changes. - -ifdef::scale-host[] -** For clusters that use the default `RollingUpdate` update strategy, the Operator automatically propagates the changes to your control plane configuration. - -** For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. -endif::scale-host[] - -ifeval::["{context}" == "recommended-control-plane-practices"] -:!scale-host: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso-using: -endif::[] diff --git a/modules/cpmso-activating.adoc b/modules/cpmso-activating.adoc deleted file mode 100644 index b58bbb82120e..000000000000 --- a/modules/cpmso-activating.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc - -:_content-type: PROCEDURE -[id="cpmso-activating_{context}"] -= Activating the control plane machine set custom resource - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` custom resource (CR) with the correct settings for your cluster exists. On a cluster with a generated CR, you must verify that the configuration in the CR is correct for your cluster and activate it. - -[NOTE] -==== -For more information about the parameters in the CR, see "Control plane machine set configuration". -==== - -.Procedure - -. View the configuration of the CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-machine-api edit controlplanemachineset.machine.openshift.io cluster ----- - -. Change the values of any fields that are incorrect for your cluster configuration. - -. When the configuration is correct, activate the CR by setting the `.spec.state` field to `Active` and saving your changes. -+ -[IMPORTANT] -==== -To activate the CR, you must change the `.spec.state` field to `Active` in the same `oc edit` session that you use to update the CR configuration. If the CR is saved with the state left as `Inactive`, the control plane machine set generator resets the CR to its original settings. -==== \ No newline at end of file diff --git a/modules/cpmso-checking-status.adoc b/modules/cpmso-checking-status.adoc deleted file mode 100644 index c89547619dcf..000000000000 --- a/modules/cpmso-checking-status.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc -// * machine_management/cpmso-troubleshooting.adoc -// * machine_management/cpmso-disabling.adoc - -ifeval::["{context}" == "cpmso-disabling"] -:cpmso-disabling: -endif::[] - -:_content-type: PROCEDURE -[id="cpmso-checking-status_{context}"] -= Checking the control plane machine set custom resource state - -You can verify the existence and state of the `ControlPlaneMachineSet` custom resource (CR). - -.Procedure - -* Determine the state of the CR by running the following command: -+ -[source,terminal] ----- -$ oc get controlplanemachineset.machine.openshift.io cluster \ - --namespace openshift-machine-api ----- - -** A result of `Active` indicates that the `ControlPlaneMachineSet` CR exists and is activated. No administrator action is required. - -** A result of `Inactive` indicates that a `ControlPlaneMachineSet` CR exists but is not activated. - -** A result of `NotFound` indicates that there is no existing `ControlPlaneMachineSet` CR. - -ifndef::cpmso-disabling[] -.Next steps - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` CR with the correct settings for your cluster exists. - -* If your cluster has an existing CR, you must verify that the configuration in the CR is correct for your cluster. - -* If your cluster does not have an existing CR, you must create one with the correct configuration for your cluster. -endif::[] - -ifeval::["{context}" == "cpmso-disabling"] -:!cpmso-disabling: -endif::[] \ No newline at end of file diff --git a/modules/cpmso-control-plane-recovery.adoc b/modules/cpmso-control-plane-recovery.adoc deleted file mode 100644 index 4660ca0dfdc9..000000000000 --- a/modules/cpmso-control-plane-recovery.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: CONCEPT -[id="cpmso-control-plane-recovery_{context}"] -= Recovery of failed control plane machines - -The Control Plane Machine Set Operator automates the recovery of control plane machines. When a control plane machine is deleted, the Operator creates a replacement with the configuration that is specified in the `ControlPlaneMachineSet` custom resource (CR). - -For clusters that use control plane machine sets, you can configure a machine health check. The machine health check deletes unhealthy control plane machines so that they are replaced. - -[IMPORTANT] -==== -If you configure a `MachineHealthCheck` resource for the control plane, set the value of `maxUnhealthy` to `1`. - -This configuration ensures that the machine health check takes no action when multiple control plane machines appear to be unhealthy. Multiple unhealthy control plane machines can indicate that the etcd cluster is degraded or that a scaling operation to replace a failed machine is in progress. - -If the etcd cluster is degraded, manual intervention might be required. If a scaling operation is in progress, the machine health check should allow it to finish. -==== \ No newline at end of file diff --git a/modules/cpmso-creating-cr.adoc b/modules/cpmso-creating-cr.adoc deleted file mode 100644 index 1fbc26b04545..000000000000 --- a/modules/cpmso-creating-cr.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc - -:_content-type: PROCEDURE -[id="cpmso-creating-cr_{context}"] -= Creating a control plane machine set custom resource - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` custom resource (CR) with the correct settings for your cluster exists. On a cluster without a generated CR, you must create the CR manually and activate it. - -[NOTE] -==== -For more information about the structure and parameters of the CR, see "Control plane machine set configuration". -==== - -.Procedure - -. Create a YAML file using the following template: -+ --- -.Control plane machine set CR YAML file template -[source,yaml] ----- -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -metadata: - name: cluster - namespace: openshift-machine-api -spec: - replicas: 3 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <1> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - state: Active <2> - strategy: - type: RollingUpdate <3> - template: - machineType: machines_v1beta1_machine_openshift_io - machines_v1beta1_machine_openshift_io: - failureDomains: - platform: <platform> <4> - <platform_failure_domains> <5> - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <6> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - spec: - providerSpec: - value: - <platform_provider_spec> <7> ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. You must specify this value when you create a `ControlPlaneMachineSet` CR. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -<2> Specify the state of the Operator. When the state is `Inactive`, the Operator is not operational. You can activate the Operator by setting the value to `Active`. -+ -[IMPORTANT] -==== -Before you activate the CR, you must ensure that its configuration is correct for your cluster requirements. -==== -<3> Specify the update strategy for the cluster. Valid values are `OnDelete` and `RollingUpdate`. The default value is `RollingUpdate`. For more information about update strategies, see "Updating the control plane configuration". -<4> Specify your cloud provider platform name. Valid values are `AWS`, `Azure`, `GCP`, `Nutanix`, and `VSphere`. -<5> Add the `<platform_failure_domains>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample failure domain configuration for your cloud provider. -+ -[NOTE] -==== -VMware vSphere does not support failure domains. For vSphere clusters, replace `<platform_failure_domains>` with an empty `failureDomains:` parameter. -==== -<6> Specify the infrastructure ID. -<7> Add the `<platform_provider_spec>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample provider specification for your cloud provider. --- - -. Refer to the sample YAML for a control plane machine set CR and populate your file with values that are appropriate for your cluster configuration. - -. Refer to the sample failure domain configuration and sample provider specification for your cloud provider and update those sections of your file with the appropriate values. - -. When the configuration is correct, activate the CR by setting the `.spec.state` field to `Active` and saving your changes. - -. Create the CR from your YAML file by running the following command: -+ -[source,terminal] ----- -$ oc create -f <control_plane_machine_set>.yaml ----- -+ -where `<control_plane_machine_set>` is the name of the YAML file that contains the CR configuration. \ No newline at end of file diff --git a/modules/cpmso-deleting.adoc b/modules/cpmso-deleting.adoc deleted file mode 100644 index 305e305792b5..000000000000 --- a/modules/cpmso-deleting.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-disabling.adoc - -:_content-type: PROCEDURE -[id="cpmso-deleting_{context}"] -= Deleting the control plane machine set - -To stop managing control plane machines with the control plane machine set on your cluster, you must delete the `ControlPlaneMachineSet` custom resource (CR). - -.Procedure - -* Delete the control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc delete controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -.Verification - -* Check the control plane machine set custom resource state. A result of `Inactive` indicates that the removal and replacement process is successful. A `ControlPlaneMachineSet` CR exists but is not activated. diff --git a/modules/cpmso-failure-domains-balancing.adoc b/modules/cpmso-failure-domains-balancing.adoc deleted file mode 100644 index ae9a19f4831a..000000000000 --- a/modules/cpmso-failure-domains-balancing.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: CONCEPT -[id="cpmso-failure-domains-balancing_{context}"] -= Balancing control plane machines - -The control plane machine set balances control plane machines across the failure domains that are specified in the custom resource (CR). - -//If failure domains must be reused, they are selected alphabetically by name. -When possible, the control plane machine set uses each failure domain equally to ensure appropriate fault tolerance. If there are fewer failure domains than control plane machines, failure domains are selected for reuse alphabetically by name. For clusters with no failure domains specified, all control plane machines are placed within a single failure domain. - -Some changes to the failure domain configuration cause the control plane machine set to rebalance the control plane machines. For example, if you add failure domains to a cluster with fewer failure domains than control plane machines, the control plane machine set rebalances the machines across all available failure domains. \ No newline at end of file diff --git a/modules/cpmso-failure-domains-provider.adoc b/modules/cpmso-failure-domains-provider.adoc deleted file mode 100644 index fcf5aa850443..000000000000 --- a/modules/cpmso-failure-domains-provider.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: REFERENCE -[id="cpmso-failure-domains-provider_{context}"] -= Failure domain platform support and configuration - -The control plane machine set concept of a failure domain is analogous to existing concepts on cloud providers. Not all platforms support the use of failure domains. - -.Failure domain support matrix -[cols="<.^,^.^,^.^"] -|==== -|Cloud provider |Support for failure domains |Provider nomenclature - -|Amazon Web Services (AWS) -|X -|link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones[Availability Zone (AZ)] - -|Google Cloud Platform (GCP) -|X -|link:https://cloud.google.com/compute/docs/regions-zones[zone] - -|Nutanix -//link:https://portal.nutanix.com/page/documents/details?targetId=Web-Console-Guide-Prism-v6_1:arc-failure-modes-c.html[Availability domain] -| -|Not applicable ^[1]^ - -|Microsoft Azure -|X -|link:https://learn.microsoft.com/en-us/azure/azure-web-pubsub/concept-availability-zones[Azure availability zone] - -|VMware vSphere -| -|Not applicable -|==== -[.small] --- -1. Nutanix has a failure domain concept, but {product-title} {product-version} does not include support for this feature. --- - -The failure domain configuration in the control plane machine set custom resource (CR) is platform-specific. For more information about failure domain parameters in the CR, see the sample failure domain configuration for your provider. \ No newline at end of file diff --git a/modules/cpmso-feat-auto-update.adoc b/modules/cpmso-feat-auto-update.adoc deleted file mode 100644 index 562ebf30780f..000000000000 --- a/modules/cpmso-feat-auto-update.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-auto-update_{context}"] -= Automatically updating the control plane configuration - -You can use the `RollingUpdate` update strategy to automatically propagate changes to your control plane configuration. - -For clusters that use the default `RollingUpdate` update strategy, the Operator creates a replacement control plane machine with the configuration that is specified in the CR. When the replacement control plane machine is ready, the Operator deletes the control plane machine that is marked for replacement. The replacement machine then joins the control plane. - -If multiple control plane machines are marked for replacement, the Operator repeats this replacement process one machine at a time until each machine is replaced. diff --git a/modules/cpmso-feat-config-update.adoc b/modules/cpmso-feat-config-update.adoc deleted file mode 100644 index be0e8bc919e8..000000000000 --- a/modules/cpmso-feat-config-update.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: PROCEDURE -[id="cpmso-feat-config-update_{context}"] -= Updating the control plane configuration - -You can make changes to the configuration of the machines in the control plane by updating the specification in the control plane machine set custom resource (CR). - -The Control Plane Machine Set Operator monitors the control plane machines and compares their configuration with the specification in the control plane machine set CR. When there is a discrepancy between the specification in the CR and the configuration of a control plane machine, the Operator marks that control plane machine for replacement. - -[NOTE] -==== -For more information about the parameters in the CR, see "Control plane machine set configuration". -==== - -.Prerequisites - -* Your cluster has an activated and functioning Control Plane Machine Set Operator. - -.Procedure - -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc edit controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -. Change the values of any fields that you want to update in your cluster configuration. - -. Save your changes. - -.Next steps - -* For clusters that use the default `RollingUpdate` update strategy, the changes to your control plane configuration are propagated automatically. - -* For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. \ No newline at end of file diff --git a/modules/cpmso-feat-test-changes.adoc b/modules/cpmso-feat-test-changes.adoc deleted file mode 100644 index d5513cf610e9..000000000000 --- a/modules/cpmso-feat-test-changes.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-test-changes_{context}"] -= Testing changes to the control plane configuration - -You can use the `OnDelete` update strategy to test changes to your control plane configuration. With this update strategy, you replace control plane machines manually. Manually replacing machines allows you to test changes to your configuration on a single machine before applying the changes more broadly. - -For clusters that are configured to use the `OnDelete` update strategy, the Operator creates a replacement control plane machine when you delete an existing machine. When the replacement control plane machine is ready, the etcd Operator allows the existing machine to be deleted. The replacement machine then joins the control plane. - -If multiple control plane machines are deleted, the Operator creates all of the required replacement machines simultaneously. \ No newline at end of file diff --git a/modules/cpmso-feat-vertical-resize.adoc b/modules/cpmso-feat-vertical-resize.adoc deleted file mode 100644 index b9f08a8bda72..000000000000 --- a/modules/cpmso-feat-vertical-resize.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-about.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-vertical-resize_{context}"] -= Vertical resizing of the control plane \ No newline at end of file diff --git a/modules/cpmso-overview.adoc b/modules/cpmso-overview.adoc deleted file mode 100644 index 20f2978e9997..000000000000 --- a/modules/cpmso-overview.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-about.adoc - -:_content-type: CONCEPT -[id="cpmso-overview_{context}"] -= Control Plane Machine Set Operator overview - -The Control Plane Machine Set Operator uses the `ControlPlaneMachineSet` custom resource (CR) to automate management of the control plane machine resources within your {product-title} cluster. - -When the state of the cluster control plane machine set is set to `Active`, the Operator ensures that the cluster has the correct number of control plane machines with the specified configuration. This allows the automated replacement of degraded control plane machines and rollout of changes to the control plane. - -A cluster has only one control plane machine set, and the Operator only manages objects in the `openshift-machine-api` namespace. \ No newline at end of file diff --git a/modules/cpmso-ts-ilb-missing.adoc b/modules/cpmso-ts-ilb-missing.adoc deleted file mode 100644 index 5e43319a9129..000000000000 --- a/modules/cpmso-ts-ilb-missing.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="cpmso-ts-ilb-missing_{context}"] -= Adding a missing Azure internal load balancer - -The `internalLoadBalancer` parameter is required in both the `ControlPlaneMachineSet` and control plane `Machine` custom resources (CRs) for Azure. If this parameter is not preconfigured on your cluster, you must add it to both CRs. - -For more information about where this parameter is located in the Azure provider specification, see the sample Azure provider specification. The placement in the control plane `Machine` CR is similar. - -.Procedure - -. List the control plane machines in your cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machines \ - -l machine.openshift.io/cluster-api-machine-role==master \ - -n openshift-machine-api ----- - -. For each control plane machine, edit the CR by running the following command: -+ -[source,terminal] ----- -$ oc edit machine <control_plane_machine_name> ----- - -. Add the `internalLoadBalancer` parameter with the correct details for your cluster and save your changes. - -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc edit controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -. Add the `internalLoadBalancer` parameter with the correct details for your cluster and save your changes. - -.Next steps - -* For clusters that use the default `RollingUpdate` update strategy, the Operator automatically propagates the changes to your control plane configuration. - -* For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. \ No newline at end of file diff --git a/modules/cpmso-ts-mhc-etcd-degraded.adoc b/modules/cpmso-ts-mhc-etcd-degraded.adoc deleted file mode 100644 index 7ccad84470cb..000000000000 --- a/modules/cpmso-ts-mhc-etcd-degraded.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="cpmso-ts-etcd-degraded_{context}"] -= Recovering a degraded etcd Operator - -Certain situations can cause the etcd Operator to become degraded. - -For example, while performing remediation, the machine health check might delete a control plane machine that is hosting etcd. If the etcd member is not reachable at that time, the etcd Operator becomes degraded. - -When the etcd Operator is degraded, manual intervention is required to force the Operator to remove the failed member and restore the cluster state. - -.Procedure - -. List the control plane machines in your cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machines \ - -l machine.openshift.io/cluster-api-machine-role==master \ - -n openshift-machine-api \ - -o wide ----- -+ -Any of the following conditions might indicate a failed control plane machine: -+ --- -** The `STATE` value is `stopped`. -** The `PHASE` value is `Failed`. -** The `PHASE` value is `Deleting` for more than ten minutes. --- -+ -[IMPORTANT] -==== -Before continuing, ensure that your cluster has two healthy control plane machines. Performing the actions in this procedure on more than one control plane machine risks losing etcd quorum and can cause data loss. - -If you have lost the majority of your control plane hosts, leading to etcd quorum loss, then you must follow the disaster recovery procedure "Restoring to a previous cluster state" instead of this procedure. -==== - -. Edit the machine CR for the failed control plane machine by running the following command: -+ -[source,terminal] ----- -$ oc edit machine <control_plane_machine_name> ----- - -. Remove the contents of the `lifecycleHooks` parameter from the failed control plane machine and save your changes. -+ -The etcd Operator removes the failed machine from the cluster and can then safely add new etcd members. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-aws.adoc b/modules/cpmso-yaml-failure-domain-aws.adoc deleted file mode 100644 index c84f33886658..000000000000 --- a/modules/cpmso-yaml-failure-domain-aws.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-aws_{context}"] -= Sample AWS failure domain configuration - -The control plane machine set concept of a failure domain is analogous to existing AWS concept of an link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones[_Availability Zone (AZ)_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring AWS failure domains in the control plane machine set, you must specify the availability zone name and the subnet to use. - -.Sample AWS failure domain values -[source,yaml] ----- -failureDomains: - aws: - - placement: - availabilityZone: <aws_zone_a> <1> - subnet: <2> - filters: - - name: tag:Name - values: - - <cluster_id>-private-<aws_zone_a> <3> - type: Filters <4> - - placement: - availabilityZone: <aws_zone_b> <5> - subnet: - filters: - - name: tag:Name - values: - - <cluster_id>-private-<aws_zone_b> <6> - type: Filters - platform: AWS <7> ----- -<1> Specifies an AWS availability zone for the first failure domain. -<2> Specifies a subnet configuration. In this example, the subnet type is `Filters`, so there is a `filters` stanza. -<3> Specifies the subnet name for the first failure domain, using the infrastructure ID and the AWS availability zone. -<4> Specifies the subnet type. The allowed values are: `ARN`, `Filters` and `ID`. The default value is `Filters`. -<5> Specifies the subnet name for an additional failure domain, using the infrastructure ID and the AWS availability zone. -<6> Specifies the cluster's infrastructure ID and the AWS availability zone for the additional failure domain. -<7> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-azure.adoc b/modules/cpmso-yaml-failure-domain-azure.adoc deleted file mode 100644 index 32b0efeeb309..000000000000 --- a/modules/cpmso-yaml-failure-domain-azure.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-azure_{context}"] -= Sample Azure failure domain configuration - -The control plane machine set concept of a failure domain is analogous to existing Azure concept of an link:https://learn.microsoft.com/en-us/azure/azure-web-pubsub/concept-availability-zones[_Azure availability zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring Azure failure domains in the control plane machine set, you must specify the availability zone name. - -.Sample Azure failure domain values -[source,yaml] ----- -failureDomains: - azure: <1> - - zone: "1" - - zone: "2" - - zone: "3" - platform: Azure <2> ----- -<1> Each instance of `zone` specifies an Azure availability zone for a failure domain. -<2> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-gcp.adoc b/modules/cpmso-yaml-failure-domain-gcp.adoc deleted file mode 100644 index fcaa407f0c15..000000000000 --- a/modules/cpmso-yaml-failure-domain-gcp.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-gcp_{context}"] -= Sample GCP failure domain configuration - -The control plane machine set concept of a failure domain is analogous to the existing GCP concept of a link:https://cloud.google.com/compute/docs/regions-zones[_zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring GCP failure domains in the control plane machine set, you must specify the zone name to use. - -.Sample GCP failure domain values -[source,yaml] ----- -failureDomains: - gcp: - - zone: <gcp_zone_a> <1> - - zone: <gcp_zone_b> <2> - - zone: <gcp_zone_c> - - zone: <gcp_zone_d> - platform: GCP <3> ----- -<1> Specifies a GCP zone for the first failure domain. -<2> Specifies an additional failure domain. Further failure domains are added the same way. -<3> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-aws.adoc b/modules/cpmso-yaml-provider-spec-aws.adoc deleted file mode 100644 index 257276d42306..000000000000 --- a/modules/cpmso-yaml-provider-spec-aws.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-aws_{context}"] -= Sample AWS provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -In the following example, `<cluster_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: - -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample AWS `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - ami: - id: ami-<ami_id_string> <1> - apiVersion: machine.openshift.io/v1beta1 - blockDevices: - - ebs: <2> - encrypted: true - iops: 0 - kmsKey: - arn: "" - volumeSize: 120 - volumeType: gp3 - credentialsSecret: - name: aws-cloud-credentials <3> - deviceIndex: 0 - iamInstanceProfile: - id: <cluster_id>-master-profile <4> - instanceType: m6i.xlarge <5> - kind: AWSMachineProviderConfig <6> - loadBalancers: <7> - - name: <cluster_id>-int - type: network - - name: <cluster_id>-ext - type: network - metadata: - creationTimestamp: null - metadataServiceOptions: {} - placement: <8> - region: <region> <9> - securityGroups: - - filters: - - name: tag:Name - values: - - <cluster_id>-master-sg <10> - subnet: {} <11> - userDataSecret: - name: master-user-data <12> ----- -<1> Specifies the {op-system-first} Amazon Machine Images (AMI) ID for the cluster. The AMI must belong to the same region as the cluster. If you want to use an AWS Marketplace image, you must complete the {product-title} subscription from the link:https://aws.amazon.com/marketplace/fulfillment?productId=59ead7de-2540-4653-a8b0-fa7926d5c845[AWS Marketplace] to obtain an AMI ID for your region. -<2> Specifies the configuration of an encrypted EBS volume. -<3> Specifies the secret name for the cluster. Do not change this value. -<4> Specifies the AWS Identity and Access Management (IAM) instance profile. Do not change this value. -<5> Specifies the AWS instance type for the control plane. -<6> Specifies the cloud provider platform type. Do not change this value. -<7> Specifies the internal (`int`) and external (`ext`) load balancers for the cluster. -<8> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. -<9> Specifies the AWS region for the cluster. -<10> Specifies the control plane machines security group. -<11> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. -<12> Specifies the control plane user data secret. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-azure.adoc b/modules/cpmso-yaml-provider-spec-azure.adoc deleted file mode 100644 index f2d6576efc61..000000000000 --- a/modules/cpmso-yaml-provider-spec-azure.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-azure_{context}"] -= Sample Azure provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane `Machine` CR that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -In the following example, `<cluster_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: - -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample Azure `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - acceleratedNetworking: true - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials <1> - namespace: openshift-machine-api - diagnostics: {} - image: <2> - offer: "" - publisher: "" - resourceID: /resourceGroups/<cluster_id>-rg/providers/Microsoft.Compute/galleries/gallery_<cluster_id>/images/<cluster_id>-gen2/versions/412.86.20220930 <3> - sku: "" - version: "" - internalLoadBalancer: <cluster_id>-internal <4> - kind: AzureMachineProviderSpec <5> - location: <region> <6> - managedIdentity: <cluster_id>-identity - metadata: - creationTimestamp: null - name: <cluster_id> - networkResourceGroup: <cluster_id>-rg - osDisk: <7> - diskSettings: {} - diskSizeGB: 1024 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: <cluster_id> <8> - resourceGroup: <cluster_id>-rg - subnet: <cluster_id>-master-subnet <9> - userDataSecret: - name: master-user-data <10> - vmSize: Standard_D8s_v3 - vnet: <cluster_id>-vnet - zone: "" <11> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the image details for your control plane machine set. -<3> Specifies an image that is compatible with your instance type. The Hyper-V generation V2 images created by the installation program have a `-gen2` suffix, while V1 images have the same name without the suffix. -<4> Specifies the internal load balancer for the control plane. This field might not be preconfigured but is required in both the `ControlPlaneMachineSet` and control plane `Machine` CRs. -<5> Specifies the cloud provider platform type. Do not change this value. -<6> Specifies the region to place control plane machines on. -<7> Specifies the disk configuration for the control plane. -<8> Specifies the public load balancer for the control plane. -<9> Specifies the subnet for the control plane. -<10> Specifies the control plane user data secret. Do not change this value. -<11> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-gcp.adoc b/modules/cpmso-yaml-provider-spec-gcp.adoc deleted file mode 100644 index 3b1f3e29fbad..000000000000 --- a/modules/cpmso-yaml-provider-spec-gcp.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-gcp_{context}"] -= Sample GCP provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -[discrete] -[id="cpmso-yaml-provider-spec-gcp-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. - -Infrastructure ID:: The `<cluster_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -Image path:: The `<path_to_image>` string is the path to the image that was used to create the disk. If you have the OpenShift CLI installed, you can obtain the path to the image by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.disks[0].image}{"\n"}' \ - get ControlPlaneMachineSet/cluster ----- - -.Sample GCP `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - canIPForward: false - credentialsSecret: - name: gcp-cloud-credentials <1> - deletionProtection: false - disks: - - autoDelete: true - boot: true - image: <path_to_image> <2> - labels: null - sizeGb: 200 - type: pd-ssd - kind: GCPMachineProviderSpec <3> - machineType: e2-standard-4 - metadata: - creationTimestamp: null - metadataServiceOptions: {} - networkInterfaces: - - network: <cluster_id>-network - subnetwork: <cluster_id>-master-subnet - projectID: <project_name> <4> - region: <region> <5> - serviceAccounts: - - email: <cluster_id>-m@<project_name>.iam.gserviceaccount.com - scopes: - - https://www.googleapis.com/auth/cloud-platform - shieldedInstanceConfig: {} - tags: - - <cluster_id>-master - targetPools: - - <cluster_id>-api - userDataSecret: - name: master-user-data <6> - zone: "" <7> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the path to the image that was used to create the disk. -+ -To use a GCP Marketplace image, specify the offer to use: -+ --- -* {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-48-x86-64-202210040145` -* {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-48-x86-64-202206140145` -* {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-48-x86-64-202206140145` --- -<3> Specifies the cloud provider platform type. Do not change this value. -<4> Specifies the name of the GCP project that you use for your cluster. -<5> Specifies the GCP region for the cluster. -<6> Specifies the control plane user data secret. Do not change this value. -<7> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-nutanix.adoc b/modules/cpmso-yaml-provider-spec-nutanix.adoc deleted file mode 100644 index 9d02f6ec0367..000000000000 --- a/modules/cpmso-yaml-provider-spec-nutanix.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-nutanix_{context}"] -= Sample Nutanix provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. - -[discrete] -[id="cpmso-yaml-provider-spec-nutanix-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. - -Infrastructure ID:: The `<cluster_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample Nutanix `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1 - bootType: "" <1> - categories: <2> - - key: <category_name> - value: <category_value> - cluster: <3> - type: uuid - uuid: <cluster_uuid> - credentialsSecret: - name: nutanix-credentials <4> - image: <5> - name: <cluster_id>-rhcos - type: name - kind: NutanixMachineProviderConfig <6> - memorySize: 16Gi <7> - metadata: - creationTimestamp: null - project: <8> - type: name - name: <project_name> - subnets: <9> - - type: uuid - uuid: <subnet_uuid> - systemDiskSize: 120Gi <10> - userDataSecret: - name: master-user-data <11> - vcpuSockets: 8 <12> - vcpusPerSocket: 1 <13> ----- -<1> Specifies the boot type that the control plane machines use. For more information about boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. Valid values are `Legacy`, `SecureBoot`, or `UEFI`. The default is `Legacy`. -+ -[NOTE] -==== -You must use the `Legacy` boot type in {product-title} {product-version}. -==== -<2> Specifies one or more Nutanix Prism categories to apply to control plane machines. This stanza requires `key` and `value` parameters for a category key-value pair that exists in Prism Central. For more information about categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -<3> Specifies a Nutanix Prism Element cluster configuration. In this example, the cluster type is `uuid`, so there is a `uuid` stanza. -<4> Specifies the secret name for the cluster. Do not change this value. -<5> Specifies the image that was used to create the disk. -<6> Specifies the cloud provider platform type. Do not change this value. -<7> Specifies the memory allocated for the control plane machines. -<8> Specifies the Nutanix project that you use for your cluster. In this example, the project type is `name`, so there is a `name` stanza. -<9> Specifies a subnet configuration. In this example, the subnet type is `uuid`, so there is a `uuid` stanza. -<10> Specifies the VM disk size for the control plane machines. -<11> Specifies the control plane user data secret. Do not change this value. -<12> Specifies the number of vCPU sockets allocated for the control plane machines. -<13> Specifies the number of vCPUs for each control plane vCPU socket. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-vsphere.adoc b/modules/cpmso-yaml-provider-spec-vsphere.adoc deleted file mode 100644 index 7dfe933f4cba..000000000000 --- a/modules/cpmso-yaml-provider-spec-vsphere.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-vsphere_{context}"] -= Sample vSphere provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. - -.Sample vSphere `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: vsphere-cloud-credentials <1> - diskGiB: 120 <2> - kind: VSphereMachineProviderSpec <3> - memoryMiB: 16384 <4> - metadata: - creationTimestamp: null - network: <5> - devices: - - networkName: <vm_network_name> - numCPUs: 4 <6> - numCoresPerSocket: 4 <7> - snapshot: "" - template: <vm_template_name> <8> - userDataSecret: - name: master-user-data <9> - workspace: - datacenter: <vcenter_datacenter_name> <10> - datastore: <vcenter_datastore_name> <11> - folder: <path_to_vcenter_vm_folder> <12> - resourcePool: <vsphere_resource_pool> <13> - server: <vcenter_server_ip> <14> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the VM disk size for the control plane machines. -<3> Specifies the cloud provider platform type. Do not change this value. -<4> Specifies the memory allocated for the control plane machines. -<5> Specifies the network on which the control plane is deployed. -<6> Specifies the number of CPUs allocated for the control plane machines. -<7> Specifies the number of cores for each control plane CPU. -<8> Specifies the vSphere VM template to use, such as `user-5ddjd-rhcos`. -<9> Specifies the control plane user data secret. Do not change this value. -<10> Specifies the vCenter Datacenter for the control plane. -<11> Specifies the vCenter Datastore for the control plane. -<12> Specifies the path to the vSphere VM folder in vCenter, such as `/dc1/vm/user-inst-5ddjd`. -<13> Specifies the vSphere resource pool for your VMs. -<14> Specifies the vCenter server IP or fully qualified domain name. \ No newline at end of file diff --git a/modules/cpmso-yaml-sample-cr.adoc b/modules/cpmso-yaml-sample-cr.adoc deleted file mode 100644 index e8008b36e217..000000000000 --- a/modules/cpmso-yaml-sample-cr.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-sample-cr_{context}"] -= Sample YAML for a control plane machine set custom resource - -The base of the `ControlPlaneMachineSet` CR is structured the same way for all platforms. - -.Sample `ControlPlaneMachineSet` CR YAML file -[source,yaml] ----- -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -metadata: - name: cluster <1> - namespace: openshift-machine-api -spec: - replicas: 3 <2> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <3> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - state: Active <4> - strategy: - type: RollingUpdate <5> - template: - machineType: machines_v1beta1_machine_openshift_io - machines_v1beta1_machine_openshift_io: - failureDomains: - platform: <platform> <6> - <platform_failure_domains> <7> - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <cluster_id> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - spec: - providerSpec: - value: - <platform_provider_spec> <8> ----- -<1> Specifies the name of the `ControlPlaneMachineSet` CR, which is `cluster`. Do not change this value. -<2> Specifies the number of control plane machines. Only clusters with three control plane machines are supported, so the `replicas` value is `3`. Horizontal scaling is not supported. Do not change this value. -<3> Specifies the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. You must specify this value when you create a `ControlPlaneMachineSet` CR. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -<4> Specifies the state of the Operator. When the state is `Inactive`, the Operator is not operational. You can activate the Operator by setting the value to `Active`. -+ -[IMPORTANT] -==== -Before you activate the Operator, you must ensure that the `ControlPlaneMachineSet` CR configuration is correct for your cluster requirements. For more information about activating the Control Plane Machine Set Operator, see "Getting started with control plane machine sets". -==== -<5> Specifies the update strategy for the cluster. The allowed values are `OnDelete` and `RollingUpdate`. The default value is `RollingUpdate`. For more information about update strategies, see "Updating the control plane configuration". -<6> Specifies the cloud provider platform name. Do not change this value. -<7> Specifies the `<platform_failure_domains>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample failure domain configuration for your cloud provider. -+ -[NOTE] -==== -VMware vSphere does not support failure domains. -==== -<8> Specifies the `<platform_provider_spec>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample provider specification for your cloud provider. \ No newline at end of file diff --git a/modules/crd-creating-aggregated-cluster-roles.adoc b/modules/crd-creating-aggregated-cluster-roles.adoc deleted file mode 100644 index a7c7c1b9726e..000000000000 --- a/modules/crd-creating-aggregated-cluster-roles.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-aggregated-cluster-role_{context}"] -= Creating cluster roles for custom resource definitions - -Cluster administrators can grant permissions to existing cluster-scoped custom resource definitions (CRDs). If you use the `admin`, `edit`, and `view` default cluster roles, you can take advantage of cluster role aggregation for their rules. - -[IMPORTANT] -==== -You must explicitly assign permissions to each of these roles. The roles with more permissions do not inherit rules from roles with fewer permissions. If you assign a rule to a role, you must also assign that verb to roles that have more permissions. For example, if you grant the `get crontabs` permission to the view role, you must also grant it to the `edit` and `admin` roles. The `admin` or `edit` role is usually assigned to the user that created a project through the project template. -==== - -.Prerequisites - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -- Create a CRD. -endif::[] - -.Procedure - -. Create a cluster role definition file for the CRD. The cluster role definition is a YAML file that contains the rules that apply to each cluster role. An {product-title} controller adds the rules that you specify to the default cluster roles. -+ -.Example YAML file for a cluster role definition -[source,yaml] ----- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 <1> -metadata: - name: aggregate-cron-tabs-admin-edit <2> - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" <3> - rbac.authorization.k8s.io/aggregate-to-edit: "true" <4> -rules: -- apiGroups: ["stable.example.com"] <5> - resources: ["crontabs"] <6> - verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] <7> ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: aggregate-cron-tabs-view <2> - labels: - # Add these permissions to the "view" default role. - rbac.authorization.k8s.io/aggregate-to-view: "true" <8> - rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" <9> -rules: -- apiGroups: ["stable.example.com"] <5> - resources: ["crontabs"] <6> - verbs: ["get", "list", "watch"] <7> ----- -<1> Use the `rbac.authorization.k8s.io/v1` API. -<2> Specify a name for the definition. -<3> Specify this label to grant permissions to the admin default role. -<4> Specify this label to grant permissions to the edit default role. -<5> Specify the group name of the CRD. -<6> Specify the plural name of the CRD that these rules apply to. -<7> Specify the verbs that represent the permissions that are granted to the role. For example, apply read and write permissions to the `admin` and `edit` roles and only read permission to the `view` role. -<8> Specify this label to grant permissions to the `view` default role. -<9> Specify this label to grant permissions to the `cluster-reader` default role. - -. Create the cluster role: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/crd-creating-crds.adoc b/modules/crd-creating-crds.adoc deleted file mode 100644 index e39db189f15a..000000000000 --- a/modules/crd-creating-crds.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-custom-resources-definition_{context}"] -= Creating a custom resource definition - -To create custom resource (CR) objects, cluster administrators must first create a custom resource definition (CRD). - -.Prerequisites - -- Access to an {product-title} cluster with `cluster-admin` user privileges. - -.Procedure - -To create a CRD: - -. Create a YAML file that contains the following field types: -+ -.Example YAML file for a CRD -[source,yaml] ----- -apiVersion: apiextensions.k8s.io/v1 <1> -kind: CustomResourceDefinition -metadata: - name: crontabs.stable.example.com <2> -spec: - group: stable.example.com <3> - versions: - name: v1 <4> - scope: Namespaced <5> - names: - plural: crontabs <6> - singular: crontab <7> - kind: CronTab <8> - shortNames: - - ct <9> ----- -<1> Use the `apiextensions.k8s.io/v1` API. -<2> Specify a name for the definition. This must be in the `<plural-name>.<group>` format using the values from the `group` and `plural` fields. -<3> Specify a group name for the API. An API group is a collection of objects that are logically related. For example, all batch objects like `Job` or `ScheduledJob` could be in the batch API group (such as `batch.api.example.com`). A good practice is to use a fully-qualified-domain name (FQDN) of your organization. -<4> Specify a version name to be used in the URL. Each API group can exist in multiple versions, for example `v1alpha`, `v1beta`, `v1`. -<5> Specify whether the custom objects are available to a project (`Namespaced`) or all projects in the cluster (`Cluster`). -<6> Specify the plural name to use in the URL. The `plural` field is the same as a resource in an API URL. -<7> Specify a singular name to use as an alias on the CLI and for display. -<8> Specify the kind of objects that can be created. The type can be in CamelCase. -<9> Specify a shorter string to match your resource on the CLI. -+ -[NOTE] -==== -By default, a CRD is cluster-scoped and available to all projects. -==== - -. Create the CRD object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- -+ -A new RESTful API endpoint is created at: -+ -[source,terminal] ----- -/apis/<spec:group>/<spec:version>/<scope>/*/<names-plural>/... ----- -+ -For example, using the example file, the following endpoint is created: -+ -[source,terminal] ----- -/apis/stable.example.com/v1/namespaces/*/crontabs/... ----- -+ -You can now use this endpoint URL to create and manage CRs. The object kind is based on the `spec.kind` field of the CRD object you created. diff --git a/modules/crd-creating-custom-resources-from-file.adoc b/modules/crd-creating-custom-resources-from-file.adoc deleted file mode 100644 index 0b55f960ce33..000000000000 --- a/modules/crd-creating-custom-resources-from-file.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Useful paired with modules/crd-inspecting-custom-resources.adoc -// -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-custom-resources-from-file_{context}"] -= Creating custom resources from a file - -After a custom resource definitions (CRD) has been added to the cluster, custom resources (CRs) can be created with the CLI from a file using the CR specification. - -.Prerequisites - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -- CRD added to the cluster by a cluster administrator. -endif::[] - -.Procedure - -. Create a YAML file for the CR. In the following example definition, the `cronSpec` and `image` custom fields are set in a CR of `Kind: CronTab`. The `Kind` comes from the `spec.kind` field of the CRD object: -+ -.Example YAML file for a CR -[source,yaml] ----- -apiVersion: "stable.example.com/v1" <1> -kind: CronTab <2> -metadata: - name: my-new-cron-object <3> - finalizers: <4> - - finalizer.stable.example.com -spec: <5> - cronSpec: "* * * * /5" - image: my-awesome-cron-image ----- -+ -<1> Specify the group name and API version (name/version) from the CRD. -<2> Specify the type in the CRD. -<3> Specify a name for the object. -<4> Specify the link:https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#finalizers[finalizers] for the object, if any. Finalizers allow controllers to implement conditions that must be completed before the object can be deleted. -<5> Specify conditions specific to the type of object. - -. After you create the file, create the object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/crd-custom-resource-definitions.adoc b/modules/crd-custom-resource-definitions.adoc deleted file mode 100644 index 59500f91841d..000000000000 --- a/modules/crd-custom-resource-definitions.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -[id="crd-custom-resource-definitions_{context}"] -= Custom resource definitions - -In the Kubernetes API, a _resource_ is an endpoint that stores a collection of API objects of a certain kind. For example, the built-in `Pods` resource contains a collection of `Pod` objects. - -A _custom resource definition_ (CRD) object defines a new, unique object type, called a _kind_, in the cluster and lets the Kubernetes API server handle its entire lifecycle. - -_Custom resource_ (CR) objects are created from CRDs that have been added to the cluster by a cluster administrator, allowing all cluster users to add the new resource type into projects. - -ifeval::["{context}" == "crd-extending-api-with-crds"] -When a cluster administrator adds a new CRD to the cluster, the Kubernetes API server reacts by creating a new RESTful resource path that can be accessed by the entire cluster or a single project (namespace) and begins serving the specified CR. - -Cluster administrators that want to grant access to the CRD to other users can use cluster role aggregation to grant access to users with the `admin`, `edit`, or `view` default cluster roles. Cluster role aggregation allows the insertion of custom policy rules into these cluster roles. This behavior integrates the new resource into the RBAC policy of the cluster as if it was a built-in resource. -endif::[] - -Operators in particular make use of CRDs by packaging them with any required RBAC policy and other software-specific logic. -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -Cluster administrators can also add CRDs manually to the cluster outside of the lifecycle of an Operator, making them available to all users. - -[NOTE] -==== -While only cluster administrators can create CRDs, developers can create the CR from an existing CRD if they have read and write permission to it. -==== -endif::[] diff --git a/modules/crd-inspecting-custom-resources.adoc b/modules/crd-inspecting-custom-resources.adoc deleted file mode 100644 index abae97060767..000000000000 --- a/modules/crd-inspecting-custom-resources.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Useful paired with modules/crd-creating-custom-resources-from-file.adoc -// -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-inspecting-custom-resources_{context}"] -= Inspecting custom resources - -You can inspect custom resource (CR) objects that exist in your cluster using the CLI. - -.Prerequisites - -* A CR object exists in a namespace to which you have access. - -.Procedure - -. To get information on a specific kind of a CR, run: -+ -[source,terminal] ----- -$ oc get <kind> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get crontab ----- -+ -.Example output -[source,terminal] ----- -NAME KIND -my-new-cron-object CronTab.v1.stable.example.com ----- -+ -Resource names are not case-sensitive, and you can use either the singular or plural forms defined in the CRD, as well as any short name. For example: -+ -[source,terminal] ----- -$ oc get crontabs ----- -+ -[source,terminal] ----- -$ oc get crontab ----- -+ -[source,terminal] ----- -$ oc get ct ----- - -. You can also view the raw YAML data for a CR: -+ -[source,terminal] ----- -$ oc get <kind> -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get ct -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -items: -- apiVersion: stable.example.com/v1 - kind: CronTab - metadata: - clusterName: "" - creationTimestamp: 2017-05-31T12:56:35Z - deletionGracePeriodSeconds: null - deletionTimestamp: null - name: my-new-cron-object - namespace: default - resourceVersion: "285" - selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object - uid: 9423255b-4600-11e7-af6a-28d2447dc82b - spec: - cronSpec: '* * * * /5' <1> - image: my-awesome-cron-image <1> ----- -<1> Custom data from the YAML that you used to create the object displays. diff --git a/modules/create-a-containerruntimeconfig-crd.adoc b/modules/create-a-containerruntimeconfig-crd.adoc deleted file mode 100644 index a4715f0cc228..000000000000 --- a/modules/create-a-containerruntimeconfig-crd.adoc +++ /dev/null @@ -1,236 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="create-a-containerruntimeconfig_{context}"] -= Creating a ContainerRuntimeConfig CR to edit CRI-O parameters - -You can change some of the settings associated with the {product-title} CRI-O runtime for the nodes associated with a specific machine config pool (MCP). Using a `ContainerRuntimeConfig` custom resource (CR), you set the configuration values and add a label to match the MCP. The MCO then rebuilds the `crio.conf` and `storage.conf` configuration files on the associated nodes with the updated values. - -[NOTE] -==== -To revert the changes implemented by using a `ContainerRuntimeConfig` CR, you must delete the CR. Removing the label from the machine config pool does not revert the changes. -==== - -You can modify the following settings by using a `ContainerRuntimeConfig` CR: - -* **PIDs limit**: Setting the PIDs limit in the `ContainerRuntimeConfig` is expected to be deprecated. If PIDs limits are required, it is recommended to use the `podPidsLimit` field in the `KubeletConfig` CR instead. The default value of the `podPidsLimit` field is `4096`. -+ -[NOTE] -==== -The CRI-O flag is applied on the cgroup of the container, while the Kubelet flag is set on the cgroup of the pod. Please adjust the PIDs limit accordingly. -==== - -* **Log level**: The `logLevel` parameter sets the CRI-O `log_level` parameter, which is the level of verbosity for log messages. The default is `info` (`log_level = info`). Other options include `fatal`, `panic`, `error`, `warn`, `debug`, and `trace`. -* **Overlay size**: The `overlaySize` parameter sets the CRI-O Overlay storage driver `size` parameter, which is the maximum size of a container image. -* **Maximum log size**: Setting the maximum log size in the `ContainerRuntimeConfig` is expected to be deprecated. If a maximum log size is required, it is recommended to use the `containerLogMaxSize` field in the `KubeletConfig` CR instead. -* **Container runtime**: The `defaultRuntime` parameter sets the container runtime to either `runc` or `crun`. The default is `runc`. - -You should have one `ContainerRuntimeConfig` CR for each machine config pool with all the config changes you want for that pool. If you are applying the same content to all the pools, you only need one `ContainerRuntimeConfig` CR for all the pools. - -You should edit an existing `ContainerRuntimeConfig` CR to modify existing settings or add new settings instead of creating a new CR for each change. It is recommended to create a new `ContainerRuntimeConfig` CR only to modify a different machine config pool, or for changes that are intended to be temporary so that you can revert the changes. - -You can create multiple `ContainerRuntimeConfig` CRs, as needed, with a limit of 10 per cluster. For the first `ContainerRuntimeConfig` CR, the MCO creates a machine config appended with `containerruntime`. With each subsequent CR, the controller creates a new `containerruntime` machine config with a numeric suffix. For example, if you have a `containerruntime` machine config with a `-2` suffix, the next `containerruntime` machine config is appended with `-3`. - -If you want to delete the machine configs, you should delete them in reverse order to avoid exceeding the limit. For example, you should delete the `containerruntime-3` machine config before deleting the `containerruntime-2` machine config. - -[NOTE] -==== -If you have a machine config with a `containerruntime-9` suffix, and you create another `ContainerRuntimeConfig` CR, a new machine config is not created, even if there are fewer than 10 `containerruntime` machine configs. -==== - -.Example showing multiple `ContainerRuntimeConfig` CRs -[source,terminal] ----- -$ oc get ctrcfg ----- - -.Example output -[source, terminal] ----- -NAME AGE -ctr-pid 24m -ctr-overlay 15m -ctr-level 5m45s ----- - -.Example showing multiple `containerruntime` machine configs -[source,terminal] ----- -$ oc get mc | grep container ----- - -.Example output -[source, terminal] ----- -... -01-master-container-runtime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 57m -... -01-worker-container-runtime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 57m -... -99-worker-generated-containerruntime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 26m -99-worker-generated-containerruntime-1 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 17m -99-worker-generated-containerruntime-2 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 7m26s -... ----- - -The following example raises the `pids_limit` to 2048, sets the `log_level` to `debug`, sets the overlay size to 8 GB, and sets the `log_size_max` to unlimited: - -.Example `ContainerRuntimeConfig` CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: ContainerRuntimeConfig -metadata: - name: overlay-size -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: '' <1> - containerRuntimeConfig: - pidsLimit: 2048 <2> - logLevel: debug <3> - overlaySize: 8G <4> - logSizeMax: "-1" <5> - defaultRuntime: "crun" <6> ----- -<1> Specifies the machine config pool label. -<2> Optional: Specifies the maximum number of processes allowed in a container. -<3> Optional: Specifies the level of verbosity for log messages. -<4> Optional: Specifies the maximum size of a container image. -<5> Optional: Specifies the maximum size allowed for the container log file. If - set to a positive number, it must be at least 8192. -<6> Optional: Specifies the container runtime to deploy to new containers. The default is `runc`. - -.Prerequisite - -* To enable crun, you must enable the `TechPreviewNoUpgrade` feature set. -+ -[NOTE] -==== -Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. -==== - -.Procedure - -To change CRI-O settings using the `ContainerRuntimeConfig` CR: - -. Create a YAML file for the `ContainerRuntimeConfig` CR: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: ContainerRuntimeConfig -metadata: - name: overlay-size -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: '' <1> - containerRuntimeConfig: <2> - pidsLimit: 2048 - logLevel: debug - overlaySize: 8G - logSizeMax: "-1" ----- -<1> Specify a label for the machine config pool that you want you want to modify. -<2> Set the parameters as needed. - -. Create the `ContainerRuntimeConfig` CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - -. Verify that the CR is created: -+ -[source,terminal] ----- -$ oc get ContainerRuntimeConfig ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -overlay-size 3m19s ----- - -. Check that a new `containerruntime` machine config is created: -+ -[source,terminal] ----- -$ oc get machineconfigs | grep containerrun ----- -+ -.Example output -[source,terminal] ----- -99-worker-generated-containerruntime 2c9371fbb673b97a6fe8b1c52691999ed3a1bfc2 3.2.0 31s ----- - -. Monitor the machine config pool until all are shown as ready: -+ -[source,terminal] ----- -$ oc get mcp worker ----- -+ -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -worker rendered-worker-169 False True False 3 1 1 0 9h ----- - -. Verify that the settings were applied in CRI-O: - -.. Open an `oc debug` session to a node in the machine config pool and run `chroot /host`. -+ -[source, terminal] ----- -$ oc debug node/<node_name> ----- -+ -[source, terminal] ----- -sh-4.4# chroot /host ----- - -.. Verify the changes in the `crio.conf` file: -+ -[source,terminal] ----- -sh-4.4# crio config | egrep 'log_level|pids_limit|log_size_max' ----- -+ -.Example output -+ -[source,terminal] ----- -pids_limit = 2048 -log_size_max = -1 -log_level = "debug" ----- - -.. Verify the changes in the `storage.conf`file: -+ -[source,terminal] ----- -sh-4.4# head -n 7 /etc/containers/storage.conf ----- -+ -.Example output -+ ----- -[storage] - driver = "overlay" - runroot = "/var/run/containers/storage" - graphroot = "/var/lib/containers/storage" - [storage.options] - additionalimagestores = [] - size = "8G" ----- diff --git a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc b/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc deleted file mode 100644 index 5206bed8c78e..000000000000 --- a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc +++ /dev/null @@ -1,254 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/node-tasks.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="create-a-kubeletconfig-crd-to-edit-kubelet-parameters_{context}"] -= Creating a KubeletConfig CRD to edit kubelet parameters - -The kubelet configuration is currently serialized as an Ignition configuration, so it can be directly edited. However, there is also a new `kubelet-config-controller` added to the Machine Config Controller (MCC). This lets you use a `KubeletConfig` custom resource (CR) to edit the kubelet parameters. - -[NOTE] -==== -As the fields in the `kubeletConfig` object are passed directly to the kubelet from upstream Kubernetes, the kubelet validates those values directly. Invalid values in the `kubeletConfig` object might cause cluster nodes to become unavailable. For valid values, see the link:https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/[Kubernetes documentation]. -==== - -Consider the following guidance: - -* Create one `KubeletConfig` CR for each machine config pool with all the config changes you want for that pool. If you are applying the same content to all of the pools, you need only one `KubeletConfig` CR for all of the pools. - -* Edit an existing `KubeletConfig` CR to modify existing settings or add new settings, instead of creating a CR for each change. It is recommended that you create a CR only to modify a different machine config pool, or for changes that are intended to be temporary, so that you can revert the changes. - -* As needed, create multiple `KubeletConfig` CRs with a limit of 10 per cluster. For the first `KubeletConfig` CR, the Machine Config Operator (MCO) creates a machine config appended with `kubelet`. With each subsequent CR, the controller creates another `kubelet` machine config with a numeric suffix. For example, if you have a `kubelet` machine config with a `-2` suffix, the next `kubelet` machine config is appended with `-3`. - -If you want to delete the machine configs, delete them in reverse order to avoid exceeding the limit. For example, you delete the `kubelet-3` machine config before deleting the `kubelet-2` machine config. - -[NOTE] -==== -If you have a machine config with a `kubelet-9` suffix, and you create another `KubeletConfig` CR, a new machine config is not created, even if there are fewer than 10 `kubelet` machine configs. -==== - -.Example `KubeletConfig` CR -[source,terminal] ----- -$ oc get kubeletconfig ----- - -[source, terminal] ----- -NAME AGE -set-max-pods 15m ----- - -.Example showing a `KubeletConfig` machine config -[source,terminal] ----- -$ oc get mc | grep kubelet ----- - -[source, terminal] ----- -... -99-worker-generated-kubelet-1 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 26m -... ----- - -The following procedure is an example to show how to configure the maximum number of pods per node on the worker nodes. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CR for the type of node you want to configure. -Perform one of the following steps: - -.. View the machine config pool: -+ -[source,terminal] ----- -$ oc describe machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: 2019-02-08T14:52:39Z - generation: 1 - labels: - custom-kubelet: set-max-pods <1> ----- -<1> If a label has been added it appears under `labels`. - -.. If the label is not present, add a key/value pair: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=set-max-pods ----- - -.Procedure - -. View the available machine configuration objects that you can select: -+ -[source,terminal] ----- -$ oc get machineconfig ----- -+ -By default, the two kubelet-related configs are `01-master-kubelet` and `01-worker-kubelet`. - -. Check the current value for the maximum pods per node: -+ -[source,terminal] ----- -$ oc describe node <node_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe node ci-ln-5grqprb-f76d1-ncnqq-worker-a-mdv94 ----- -+ -Look for `value: pods: <value>` in the `Allocatable` stanza: -+ -.Example output -[source,terminal] ----- -Allocatable: - attachable-volumes-aws-ebs: 25 - cpu: 3500m - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 15341844Ki - pods: 250 ----- - -. Set the maximum pods per node on the worker nodes by creating a custom resource file that contains the kubelet configuration: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-max-pods -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods <1> - kubeletConfig: - maxPods: 500 <2> ----- -<1> Enter the label from the machine config pool. -<2> Add the kubelet configuration. In this example, use `maxPods` to set the maximum pods per node. -+ -[NOTE] -==== -The rate at which the kubelet talks to the API server depends on queries per second (QPS) and burst values. The default values, `50` for `kubeAPIQPS` and `100` for `kubeAPIBurst`, are sufficient if there are limited pods running on each node. It is recommended to update the kubelet QPS and burst rates if there are enough CPU and memory resources on the node. - -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-max-pods -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods - kubeletConfig: - maxPods: <pod_count> - kubeAPIBurst: <burst_rate> - kubeAPIQPS: <QPS> ----- -==== -.. Update the machine config pool for workers with the label: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=set-max-pods ----- - -.. Create the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc create -f change-maxPods-cr.yaml ----- - -.. Verify that the `KubeletConfig` object is created: -+ -[source,terminal] ----- -$ oc get kubeletconfig ----- -+ -.Example output -[source, terminal] ----- -NAME AGE -set-max-pods 15m ----- -+ -Depending on the number of worker nodes in the cluster, wait for the worker nodes to be rebooted one by one. For a cluster with 3 worker nodes, this could take about 10 to 15 minutes. - -. Verify that the changes are applied to the node: - -.. Check on a worker node that the `maxPods` value changed: -+ -[source,terminal] ----- -$ oc describe node <node_name> ----- - -.. Locate the `Allocatable` stanza: -+ -[source,terminal] ----- - ... -Allocatable: - attachable-volumes-gce-pd: 127 - cpu: 3500m - ephemeral-storage: 123201474766 - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 14225400Ki - pods: 500 <1> - ... ----- -<1> In this example, the `pods` parameter should report the value you set in the `KubeletConfig` object. - -. Verify the change in the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc get kubeletconfigs set-max-pods -o yaml ----- -+ -This should show a status of `True` and `type:Success`, as shown in the following example: -+ -[source,yaml] ----- -spec: - kubeletConfig: - maxPods: 500 - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods -status: - conditions: - - lastTransitionTime: "2021-06-30T17:04:07Z" - message: Success - status: "True" - type: Success ----- diff --git a/modules/creating-a-custom-ingress-controller.adoc b/modules/creating-a-custom-ingress-controller.adoc deleted file mode 100644 index 9e395c8e60f9..000000000000 --- a/modules/creating-a-custom-ingress-controller.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// *ingress-controller-dnsmgt.adoc - -:_content-type: PROCEDURE -[id="creating-a-custom-ingress-controller_{context}"] -= Creating a custom Ingress Controller with the `Unmanaged` DNS management policy - -As a cluster administrator, you can create a new custom Ingress Controller with the `Unmanaged` DNS management policy. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a custom resource (CR) file named `sample-ingress.yaml` containing the following: - -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - namespace: openshift-ingress-operator - name: <name> <1> -spec: - domain: <domain> <2> - endpointPublishingStrategy: - type: LoadBalancerService - loadBalancer: - scope: External <3> - dnsManagementPolicy: Unmanaged <4> ----- -<1> Specify the `<name>` with a name for the `IngressController` object. -<2> Specify the `domain` based on the DNS record that was created as a prerequisite. -<3> Specify the `scope` as `External` to expose the load balancer externally. -<4> `dnsManagementPolicy` indicates if the Ingress Controller is managing the lifecycle of the wildcard DNS record associated with the load balancer. -The valid values are `Managed` and `Unmanaged`. The default value is `Managed`. - - -. Save the file to apply the changes. -+ -[source,terminal] ----- -oc apply -f <name>.yaml <1> ----- diff --git a/modules/creating-a-machine-pool-cli.adoc b/modules/creating-a-machine-pool-cli.adoc deleted file mode 100644 index 10432a15aaad..000000000000 --- a/modules/creating-a-machine-pool-cli.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="creating_machine_pools_cli_{context}"] -= Creating a machine pool using the ROSA CLI - -You can create additional machine pools for your {product-title} (ROSA) cluster by using the ROSA CLI (`rosa`). - -.Prerequisites - -* You installed and configured the latest {product-title} (ROSA) CLI, `rosa`, on your workstation. -* You logged in to your Red Hat account using the ROSA CLI (`rosa`). -* You created a ROSA cluster. - -.Procedure - -* To add a machine pool that does not use autoscaling, create the machine pool and define the instance type, compute (also known as worker) node count, and node labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=<cluster-name> \ - --name=<machine_pool_id> \ <1> - --replicas=<replica_count> \ <2> - --instance-type=<instance_type> \ <3> - --labels=<key>=<value>,<key>=<value> \ <4> - --taints=<key>=<value>:<effect>,<key>=<value>:<effect> \ <5> - --use-spot-instances \ <6> - --spot-max-price=0.5 <7> ----- -<1> Specifies the name of the machine pool. Replace `<machine_pool_id>` with the name of your machine pool. -<2> Specifies the number of compute nodes to provision. If you deployed ROSA using a single availability zone, this defines the number of compute nodes to provision to the machine pool for the zone. If you deployed your cluster using multiple availability zones, this defines the number of compute nodes to provision in total across all zones and the count must be a multiple of 3. The `--replicas` argument is required when autoscaling is not configured. -<3> Optional: Sets the instance type for the compute nodes in your machine pool. The instance type defines the vCPU and memory allocation for each compute node in the pool. Replace `<instance_type>` with an instance type. The default is `m5.xlarge`. You cannot change the instance type for a machine pool after the pool is created. -<4> Optional: Defines the labels for the machine pool. Replace `<key>=<value>,<key>=<value>` with a comma-delimited list of key-value pairs, for example `--labels=key1=value1,key2=value2`. -<5> Optional: Defines the taints for the machine pool. Replace `<key>=<value>:<effect>,<key>=<value>:<effect>` with a key, value, and effect for each taint, for example `--taints=key1=value1:NoSchedule,key2=value2:NoExecute`. Available effects include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -<6> Optional: Configures your machine pool to deploy machines as non-guaranteed AWS Spot Instances. For information, see link:https://aws.amazon.com/ec2/spot/[Amazon EC2 Spot Instances] in the AWS documentation. If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -<7> Optional: If you have opted to use Spot Instances, you can specify this argument to define a maximum hourly price for a Spot Instance. If this argument is not specified, the on-demand price is used. -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -The following example creates a machine pool called `mymachinepool` that uses the `m5.xlarge` instance type and has 2 compute node replicas. The example also adds 2 workload-specific labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=mycluster --name=mymachinepool --replicas=2 --instance-type=m5.xlarge --labels=app=db,tier=backend ----- -+ -.Example output -[source,terminal] ----- -I: Machine pool 'mymachinepool' created successfully on cluster 'mycluster' -I: To view all machine pools, run 'rosa list machinepools -c mycluster' ----- - -* To add a machine pool that uses autoscaling, create the machine pool and define the autoscaling configuration, instance type and node labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=<cluster-name> \ - --name=<machine_pool_id> \ <1> - --enable-autoscaling \ <2> - --min-replicas=<minimum_replica_count> \ <3> - --max-replicas=<maximum_replica_count> \ <3> - --instance-type=<instance_type> \ <4> - --labels=<key>=<value>,<key>=<value> \ <5> - --taints=<key>=<value>:<effect>,<key>=<value>:<effect> \ <6> - --use-spot-instances \ <7> - --spot-max-price=0.5 <8> ----- -<1> Specifies the name of the machine pool. Replace `<machine_pool_id>` with the name of your machine pool. -<2> Enables autoscaling in the machine pool to meet the deployment needs. -<3> Defines the minimum and maximum compute node limits. The cluster autoscaler does not reduce or increase the machine pool node count beyond the limits that you specify. If you deployed ROSA using a single availability zone, the `--min-replicas` and `--max-replicas` arguments define the autoscaling limits in the machine pool for the zone. If you deployed your cluster using multiple availability zones, the arguments define the autoscaling limits in total across all zones and the counts must be multiples of 3. -<4> Optional: Sets the instance type for the compute nodes in your machine pool. The instance type defines the vCPU and memory allocation for each compute node in the pool. Replace `<instance_type>` with an instance type. The default is `m5.xlarge`. You cannot change the instance type for a machine pool after the pool is created. -<5> Optional: Defines the labels for the machine pool. Replace `<key>=<value>,<key>=<value>` with a comma-delimited list of key-value pairs, for example `--labels=key1=value1,key2=value2`. -<6> Optional: Defines the taints for the machine pool. Replace `<key>=<value>:<effect>,<key>=<value>:<effect>` with a key, value, and effect for each taint, for example `--taints=key1=value1:NoSchedule,key2=value2:NoExecute`. Available effects include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -<7> Optional: Configures your machine pool to deploy machines as non-guaranteed AWS Spot Instances. For information, see link:https://aws.amazon.com/ec2/spot/[Amazon EC2 Spot Instances] in the AWS documentation. If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -<8> Optional: If you have opted to use Spot Instances, you can specify this argument to define a maximum hourly price for a Spot Instance. If this argument is not specified, the on-demand price is used. -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -The following example creates a machine pool called `mymachinepool` that uses the `m5.xlarge` instance type and has autoscaling enabled. The minimum compute node limit is 3 and the maximum is 6 overall. The example also adds 2 workload-specific labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=mycluster --name=mymachinepool --enable-autoscaling --min-replicas=3 --max-replicas=6 --instance-type=m5.xlarge --labels=app=db,tier=backend ----- -+ -.Example output -[source,terminal] ----- -I: Machine pool 'mymachinepool' created successfully on cluster 'mycluster' -I: To view all machine pools, run 'rosa list machinepools -c mycluster' ----- - -.Verification - -. List the available machine pools in your cluster: -+ -[source,terminal] ----- -$ rosa list machinepools --cluster=<cluster_name> ----- -+ -.Example output -[source,terminal] ----- -ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES SPOT INSTANCES -Default No 3 m5.xlarge us-east-1a, us-east-1b, us-east-1c N/A -mymachinepool Yes 3-6 m5.xlarge app=db, tier=backend us-east-1a, us-east-1b, us-east-1c No ----- - -. Verify that the machine pool is included in the output and the configuration is as expected. diff --git a/modules/creating-a-machine-pool-ocm.adoc b/modules/creating-a-machine-pool-ocm.adoc deleted file mode 100644 index c205af14ed4b..000000000000 --- a/modules/creating-a-machine-pool-ocm.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc -// * nodes/rosa-managing-worker-nodes.adoc -// * osd_cluster_admin/osd_nodes/osd-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="creating_machine_pools_ocm_{context}"] -ifndef::openshift-rosa[] -= Creating a machine pool -endif::openshift-rosa[] -ifdef::openshift-rosa[] -= Creating a machine pool using OpenShift Cluster Manager -endif::openshift-rosa[] - -ifndef::openshift-rosa[] -A default machine pool is created when you install an {product-title} cluster. After installation, you can create additional machine pools for your cluster by using {cluster-manager}. -endif::openshift-rosa[] -ifdef::openshift-rosa[] -You can create additional machine pools for your {product-title} (ROSA) cluster by using {cluster-manager}. -endif::openshift-rosa[] - -ifndef::openshift-rosa[] -[IMPORTANT] -==== -The compute (also known as worker) node instance types, autoscaling options, and node counts that are available to you depend on your -ifdef::openshift-rosa[] -ROSA -endif::openshift-rosa[] -ifndef::openshift-rosa[] -{product-title} -endif::[] -subscriptions, resource quotas and deployment scenario. For more information, contact your sales representative or Red Hat support. -==== -endif::openshift-rosa[] - -.Prerequisites - -ifdef::openshift-rosa[] -* You created a ROSA cluster. -endif::openshift-rosa[] -ifndef::openshift-rosa[] -* You created an {product-title} cluster. -endif::[] - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Under the *Machine pools* tab, click *Add machine pool*. - -. Add a *Machine pool name*. - -. Select a *Worker node instance type* from the drop-down menu. The instance type defines the vCPU and memory allocation for each compute node in the machine pool. -+ -[NOTE] -==== -You cannot change the instance type for a machine pool after the pool is created. -==== - -. Optional: Configure autoscaling for the machine pool: -.. Select *Enable autoscaling* to automatically scale the number of machines in your machine pool to meet the deployment needs. -ifdef::openshift-dedicated[] -+ -[NOTE] -==== -The *Enable autoscaling* option is only available for {product-title} if you have the `capability.cluster.autoscale_clusters` subscription. For more information, contact your sales representative or Red Hat support. -==== -endif::openshift-dedicated[] -.. Set the minimum and maximum node count limits for autoscaling. The cluster autoscaler does not reduce or increase the machine pool node count beyond the limits that you specify. -** If you deployed your cluster using a single availability zone, set the *Minimum and maximum node count*. This defines the minimum and maximum compute node limits in the availability zone. -** If you deployed your cluster using multiple availability zones, set the *Minimum nodes per zone* and *Maximum nodes per zone*. This defines the minimum and maximum compute node limits per zone. -+ -[NOTE] -==== -Alternatively, you can set your autoscaling preferences for the machine pool after the machine pool is created. -==== - -. If you did not enable autoscaling, select a compute node count: -* If you deployed your cluster using a single availability zone, select a *Worker node count* from the drop-down menu. This defines the number of compute nodes to provision to the machine pool for the zone. -* If you deployed your cluster using multiple availability zones, select a *Worker node count (per zone)* from the drop-down menu. This defines the number of compute nodes to provision to the machine pool per zone. - -. Optional: Add node labels and taints for your machine pool: -.. Expand the *Edit node labels and taints* menu. -.. Under *Node labels*, add *Key* and *Value* entries for your node labels. -.. Under *Taints*, add *Key* and *Value* entries for your taints. -.. For each taint, select an *Effect* from the drop-down menu. Available options include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -+ -[NOTE] -==== -Alternatively, you can add the node labels and taints after you create the machine pool. -==== - -ifdef::openshift-dedicated[] -. Optional: If you deployed {product-title} on AWS using the Customer Cloud Subscription (CCS) model, use Amazon EC2 Spot Instances if you want to configure your machine pool to deploy machines as non-guaranteed AWS Spot Instances: -.. Select *Use Amazon EC2 Spot Instances*. -.. Leave *Use On-Demand instance price* selected to use the on-demand instance price. Alternatively, select *Set maximum price* to define a maximum hourly price for a Spot Instance. -+ -For more information about Amazon EC2 Spot Instances, see the link:https://aws.amazon.com/ec2/spot/[AWS documentation]. -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -. Optional: Use Amazon EC2 Spot Instances if you want to configure your machine pool to deploy machines as non-guaranteed AWS Spot Instances: -.. Select *Use Amazon EC2 Spot Instances*. -.. Leave *Use On-Demand instance price* selected to use the on-demand instance price. Alternatively, select *Set maximum price* to define a maximum hourly price for a Spot Instance. -+ -For more information about Amazon EC2 Spot Instances, see the link:https://aws.amazon.com/ec2/spot/[AWS documentation]. -endif::openshift-rosa[] -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -[NOTE] -==== -If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -==== - -. Click *Add machine pool* to create the machine pool. - -.Verification - -* Verify that the machine pool is visible on the *Machine pools* page and the configuration is as expected. diff --git a/modules/creating-a-machine-pool.adoc b/modules/creating-a-machine-pool.adoc deleted file mode 100644 index aa35f2adedb3..000000000000 --- a/modules/creating-a-machine-pool.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: CONCEPT -[id="creating_a_machine_pool_{context}"] -= Creating a machine pool - -A default machine pool is created when you install a {product-title} (ROSA) cluster. After installation, you can create additional machine pools for your cluster by using {cluster-manager} or the ROSA CLI (`rosa`). diff --git a/modules/creating-a-project-using-the-CLI.adoc b/modules/creating-a-project-using-the-CLI.adoc deleted file mode 100644 index edec035d947e..000000000000 --- a/modules/creating-a-project-using-the-CLI.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="creating-a-project-using-the-CLI_{context}"] -= Creating a project using the CLI - -If allowed by your cluster administrator, you can create a new project. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are considered critical by {product-title}. As such, {product-title} does not allow you to create Projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these Projects using the `oc adm new-project` command. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -.Procedure - -* Run: -+ -[source,terminal] ----- -$ oc new-project <project_name> \ - --description="<description>" --display-name="<display_name>" ----- -+ -For example: -+ -[source,terminal] ----- -$ oc new-project hello-openshift \ - --description="This is an example project" \ - --display-name="Hello OpenShift" ----- - -[NOTE] -==== -The number of projects you are allowed to create -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -might be limited by the system administrator. -endif::[] -ifdef::openshift-online[] -is limited. -endif::[] -After your limit is reached, you might have to delete an existing project in -order to create a new one. -==== diff --git a/modules/creating-a-project-using-the-web-console.adoc b/modules/creating-a-project-using-the-web-console.adoc deleted file mode 100644 index e6aa4786edce..000000000000 --- a/modules/creating-a-project-using-the-web-console.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="creating-a-project-using-the-web-console_{context}"] -= Creating a project using the web console - -If allowed by your cluster administrator, you can create a new project. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are considered critical by {product-title}. As such, {product-title} does not allow you to create Projects starting with `openshift-` using the web console. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -.Procedure - -. Navigate to *Home* -> *Projects*. - -. Click *Create Project*. - -. Enter your project details. - -. Click *Create*. diff --git a/modules/creating-a-service-account-in-your-project.adoc b/modules/creating-a-service-account-in-your-project.adoc deleted file mode 100644 index 8fa8055bafa0..000000000000 --- a/modules/creating-a-service-account-in-your-project.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="creating-a-service-account-in-your-project_{context}"] -= Creating a service account in your project - -Add a service account in your user-defined project. Include an `eks.amazonaws.com/role-arn` annotation in the service account configuration that references the Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that you want the service account to assume. - -.Prerequisites - -* You have created an AWS IAM role for your service account. For more information, see _Setting up an AWS IAM role for a service account_. -* You have access to a {product-title} with AWS Security Token Service (STS) cluster. Admin-level user privileges are not required. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. In your {product-title} cluster, create a project: -+ -[source,terminal] ----- -$ oc new-project <project_name> <1> ----- -<1> Replace `<project_name>` with the name of your project. The name must match the project name that you specified in your AWS IAM role configuration. -+ -[NOTE] -==== -You are automatically switched to the project when it is created. -==== - -. Create a file named `test-service-account.yaml` with the following service account configuration: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: <service_account_name> <1> - namespace: <project_name> <2> - annotations: - eks.amazonaws.com/role-arn: "<aws_iam_role_arn>" <3> ----- -// Add these annotations in the preceding code block later: -// eks.amazonaws.com/sts-regional-endpoints: "true" <4> -// eks.amazonaws.com/token-expiration: "86400" <5> -<1> Replace `<service_account_name>` with the name of your service account. The name must match the service account name that you specified in your AWS IAM role configuration. -<2> Replace `<project_name>` with the name of your project. The name must match the project name that you specified in your AWS IAM role configuration. -<3> Specifies the ARN of the AWS IAM role that the service account assumes for use within your pod. Replace `<aws_iam_role_arn>` with the ARN for the AWS IAM role that you created for your service account. The format of the role ARN is `arn:aws:iam::<aws_account_id>:role/<aws_iam_role_name>`. -// Add these call outs when the additional annotations are added later: -//<4> Optional: When set to `true`, the `AWS_STS_REGIONAL_ENDPOINTS=regional` environment variable is defined in the pod and AWS STS requests are sent to endpoints for the active region. When this option is not set to `true`, the AWS STS requests are by default sent to the global endpoint \https://sts.amazonaws.com. For more information, see link:https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html[AWS STS Regionalized endpoints] in the AWS documentation. -//<5> Optional: Specifies the token expiration time in seconds. The default is `86400`. - -. Create the service account in your project: -+ -[source,terminal] ----- -$ oc create -f test-service-account.yaml ----- -+ -.Example output: -[source,terminal] ----- -serviceaccount/<service_account_name> created ----- - -. Review the details of the service account: -+ -[source,terminal] ----- -$ oc describe serviceaccount <service_account_name> <1> ----- -<1> Replace `<service_account_name>` with the name of your service account. -+ -.Example output: -+ -[source,terminal] ----- -Name: <service_account_name> <1> -Namespace: <project_name> <2> -Labels: <none> -Annotations: eks.amazonaws.com/role-arn: <aws_iam_role_arn> <3> -Image pull secrets: <service_account_name>-dockercfg-rnjkq -Mountable secrets: <service_account_name>-dockercfg-rnjkq -Tokens: <service_account_name>-token-4gbjp -Events: <none> ----- -// Add these annotations in the preceding code block later: -// eks.amazonaws.com/sts-regional-endpoints: true <3> -// eks.amazonaws.com/token-expiration: 86400 <3> -<1> Specifies the name of the service account. -<2> Specifies the project that contains the service account. -<3> Lists the annotation for the ARN of the AWS IAM role that the service account assumes. -// Update the preceding call out to the following when the additional annotations are added later: -//<3> Lists the annotations for the ARN of the AWS IAM role that the service account assumes, the optional regional endpoint configuration, and the optional token expiration specification. diff --git a/modules/creating-an-example-aws-sdk-container-image.adoc b/modules/creating-an-example-aws-sdk-container-image.adoc deleted file mode 100644 index 0b935539f853..000000000000 --- a/modules/creating-an-example-aws-sdk-container-image.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="creating-an-example-aws-sdk-container-image_{context}"] -= Creating an example AWS SDK container image - -The steps in this procedure provide an example method to create a container image that includes an AWS SDK. - -The example steps use Podman to create the container image and Quay.io to host the image. For more information about Quay.io, see link:https://docs.quay.io/solution/getting-started.html[Getting Started with Quay.io]. The container image can be used to deploy pods that can run AWS SDK operations. - -[NOTE] -==== -In this example procedure, the AWS Boto3 SDK for Python is installed into a container image. For more information about installing and using the AWS Boto3 SDK, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. For details about other AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. -==== - -.Prerequisites - -* You have installed Podman on your installation host. -* You have a Quay.io user account. - -.Procedure - -. Add the following configuration to a file named `Containerfile`: -+ -[source,terminal] ----- -FROM ubi9/ubi <1> -RUN dnf makecache && dnf install -y python3-pip && dnf clean all && pip3 install boto3>=1.15.0 <2> ----- -<1> Specifies the Red Hat Universal Base Image version 9. -<2> Installs the AWS Boto3 SDK by using the `pip` package management system. In this example, AWS Boto3 SDK version 1.15.0 or later is installed. - -. From the directory that contains the file, build a container image named `awsboto3sdk`: -+ -[source,terminal] ----- -$ podman build -t awsboto3sdk . ----- - -. Log in to Quay.io: -+ -[source,terminal] ----- -$ podman login quay.io ----- - -. Tag the image in preparation for the upload to Quay.io: -+ -[source,terminal] ----- -$ podman tag localhost/awsboto3sdk quay.io/<quay_username>/awsboto3sdk:latest <1> ----- -<1> Replace `<quay_username>` with your Quay.io username. - -. Push the tagged container image to Quay.io: -+ -[source,terminal] ----- -$ podman push quay.io/<quay_username>/awsboto3sdk:latest <1> ----- -<1> Replace `<quay_username>` with your Quay.io username. - -. Make the Quay.io repository that contains the image public. This publishes the image so that it can be used to deploy a pod in your {product-title} cluster: -.. On https://quay.io/, navigate to the *Repository Settings* page for repository that contains the image. -.. Click *Make Public* to make the repository publicly available. diff --git a/modules/creating-an-infra-node.adoc b/modules/creating-an-infra-node.adoc deleted file mode 100644 index dfc6e9ea91a1..000000000000 --- a/modules/creating-an-infra-node.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc -// * machine_management/creating-infrastructure-machinesets.adoc -// * nodes/nodes/nodes-nodes-creating-infrastructure-nodes.adoc - -:_content-type: PROCEDURE -[id="creating-an-infra-node_{context}"] -= Creating an infrastructure node - -[IMPORTANT] -==== -See Creating infrastructure machine sets for installer-provisioned infrastructure environments or for any cluster where the control plane nodes are managed by the machine API. -==== - -Requirements of the cluster dictate that infrastructure, also called `infra` nodes, be provisioned. The installer only provides provisions for control plane and worker nodes. Worker nodes can be designated as infrastructure nodes or application, also called `app`, nodes through labeling. - -.Procedure - -. Add a label to the worker node that you want to act as application node: -+ -[source,terminal] ----- -$ oc label node <node-name> node-role.kubernetes.io/app="" ----- - -. Add a label to the worker nodes that you want to act as infrastructure nodes: -+ -[source,terminal] ----- -$ oc label node <node-name> node-role.kubernetes.io/infra="" ----- - -. Check to see if applicable nodes now have the `infra` role and `app` roles: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. Create a default cluster-wide node selector. The default node selector is applied to pods created in all namespaces. This creates an intersection with any existing node selectors on a pod, which additionally constrains the pod's selector. -+ -[IMPORTANT] -==== -If the default node selector key conflicts with the key of a pod's label, then the default node selector is not applied. - -However, do not set a default node selector that might cause a pod to become unschedulable. For example, setting the default node selector to a specific node role, such as `node-role.kubernetes.io/infra=""`, when a pod's label is set to a different node role, such as `node-role.kubernetes.io/master=""`, can cause the pod to become unschedulable. For this reason, use caution when setting the default node selector to specific node roles. - -You can alternatively use a project node selector to avoid cluster-wide node selector key conflicts. -==== - -.. Edit the `Scheduler` object: -+ -[source,terminal] ----- -$ oc edit scheduler cluster ----- - -.. Add the `defaultNodeSelector` field with the appropriate node selector: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -spec: - defaultNodeSelector: topology.kubernetes.io/region=us-east-1 <1> -# ... ----- -<1> This example node selector deploys pods on nodes in the `us-east-1` region by default. - -.. Save the file to apply the changes. - -You can now move infrastructure resources to the newly labeled `infra` nodes. diff --git a/modules/creating-custom-links.adoc b/modules/creating-custom-links.adoc deleted file mode 100644 index 3636d2e74d4a..000000000000 --- a/modules/creating-custom-links.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="creating-custom-links_{context}"] -= Creating custom links in the web console - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on -*ConsoleLink*. -. Select *Instances* tab -. Click *Create Console Link* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: example -spec: - href: 'https://www.example.com' - location: HelpMenu <1> - text: Link 1 ----- -<1> Valid location settings are `HelpMenu`, `UserMenu`, `ApplicationMenu`, and -`NamespaceDashboard`. -+ -To make the custom link appear in all namespaces, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: namespaced-dashboard-link-for-all-namespaces -spec: - href: 'https://www.example.com' - location: NamespaceDashboard - text: This appears in all namespaces ----- -+ -To make the custom link appear in only some namespaces, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: namespaced-dashboard-for-some-namespaces -spec: - href: 'https://www.example.com' - location: NamespaceDashboard - # This text will appear in a box called "Launcher" under "namespace" or "project" in the web console - text: Custom Link Text - namespaceDashboard: - namespaces: - # for these specific namespaces - - my-namespace - - your-namespace - - other-namespace ----- -+ -To make the custom link appear in the application menu, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: application-menu-link-1 -spec: - href: 'https://www.example.com' - location: ApplicationMenu - text: Link 1 - applicationMenu: - section: My New Section - # image that is 24x24 in size - imageURL: https://via.placeholder.com/24 ----- - -. Click *Save* to apply your changes. diff --git a/modules/creating-custom-live-rhcos-iso.adoc b/modules/creating-custom-live-rhcos-iso.adoc deleted file mode 100644 index 951d9dda35d7..000000000000 --- a/modules/creating-custom-live-rhcos-iso.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_sno/install-sno-installing-sno.adoc - -:_module-type: PROCEDURE -[id="create-custom-live-rhcos-iso_{context}"] -= Creating a custom live {op-system} ISO for remote server access - -In some cases, you cannot attach an external disk drive to a server, however, you need to access the server remotely to provision a node. -It is recommended to enable SSH access to the server. -You can create a live {op-system} ISO with SSHd enabled and with predefined credentials so that you can access the server after it boots. - -.Prerequisites - -* You installed the `butane` utility. - -.Procedure - -. Download the `coreos-installer` binary from the `coreos-installer` image link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[mirror] page. - -. Download the latest live {op-system} ISO from link:https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.12/latest/[mirror.openshift.com]. - -. Create the `embedded.yaml` file that the `butane` utility uses to create the Ignition file: -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: sshd - labels: - machineconfiguration.openshift.io/role: worker -passwd: - users: - - name: core <1> - ssh_authorized_keys: - - '<ssh_key>' ----- -<1> The `core` user has sudo privileges. - -. Run the `butane` utility to create the Ignition file using the following command: -+ -[source,terminal] ----- -$ butane -pr embedded.yaml -o embedded.ign ----- - -. After the Ignition file is created, you can include the configuration in a new live {op-system} ISO, which is named `rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso`, with the `coreos-installer` utility: -+ -[source,terminal,subs="attributes+"] ----- -$ coreos-installer iso ignition embed -i embedded.ign rhcos-{product-version}.0-x86_64-live.x86_64.iso -o rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso ----- - -.Verification - -* Check that the custom live ISO can be used to boot the server by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -# coreos-installer iso ignition show rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso ----- - -+ -.Example output -[source,json] ----- -{ - "ignition": { - "version": "3.2.0" - }, - "passwd": { - "users": [ - { - "name": "core", - "sshAuthorizedKeys": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCZnG8AIzlDAhpyENpK2qKiTT8EbRWOrz7NXjRzopbPu215mocaJgjjwJjh1cYhgPhpAp6M/ttTk7I4OI7g4588Apx4bwJep6oWTU35LkY8ZxkGVPAJL8kVlTdKQviDv3XX12l4QfnDom4tm4gVbRH0gNT1wzhnLP+LKYm2Ohr9D7p9NBnAdro6k++XWgkDeijLRUTwdEyWunIdW1f8G0Mg8Y1Xzr13BUo3+8aey7HLKJMDtobkz/C8ESYA/f7HJc5FxF0XbapWWovSSDJrr9OmlL9f4TfE+cQk3s+eoKiz2bgNPRgEEwihVbGsCN4grA+RzLCAOpec+2dTJrQvFqsD alosadag@sonnelicht.local" - ] - } - ] - } -} ----- diff --git a/modules/creating-custom-seccomp-profile.adoc b/modules/creating-custom-seccomp-profile.adoc deleted file mode 100644 index 68bf177695aa..000000000000 --- a/modules/creating-custom-seccomp-profile.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * security/seccomp-profiles.adoc - -:_content-type: PROCEDURE -[id="creating-custom-seccomp-profile_{context}"] -= Creating seccomp profiles -You can use the `MachineConfig` object to create profiles. - -Seccomp can restrict system calls (syscalls) within a container, limiting the access of your application. - -.Prerequisites - -* You have cluster admin permissions. -* You have created a custom security context constraints (SCC). For more information, see _Additional resources_. - -.Procedure - -* Create the `MachineConfig` object: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: custom-seccomp -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;charset=utf-8;base64,<hash> - filesystem: root - mode: 0644 - path: /var/lib/kubelet/seccomp/seccomp-nostat.json ----- \ No newline at end of file diff --git a/modules/creating-ibm-power-vs-workspace-procedure.adoc b/modules/creating-ibm-power-vs-workspace-procedure.adoc deleted file mode 100644 index ef98301347dc..000000000000 --- a/modules/creating-ibm-power-vs-workspace-procedure.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// * installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc - -:_content-type: PROCEDURE -[id="creating-ibm-power-vs-workspace-procedure_{context}"] -= Creating an {ibmpowerProductName} Virtual Server workspace - -Use the following procedure to create an {ibmpowerProductName} Virtual Server workspace. - -.Procedure - -. To create an {ibmpowerProductName} Virtual Server workspace, complete step 1 to step 5 from the IBM Cloud documentation for link:https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server[Creating an IBM Power Virtual Server]. - -. After it has finished provisioning, retrieve the 32-character alphanumeric ID of your new workspace by entering the following command: -+ -[source,terminal] ----- -$ ibmcloud resource service-instances | grep <workspace name> ----- -+ diff --git a/modules/creating-infra-machines.adoc b/modules/creating-infra-machines.adoc deleted file mode 100644 index b2da9ecaace2..000000000000 --- a/modules/creating-infra-machines.adoc +++ /dev/null @@ -1,160 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="creating-infra-machines_{context}"] -= Creating a machine config pool for infrastructure machines - -If you need infrastructure machines to have dedicated configurations, you must create an infra pool. - -.Procedure - -. Add a label to the node you want to assign as the infra node with a specific label: -+ -[source,terminal] ----- -$ oc label node <node_name> <label> ----- -+ -[source,terminal] ----- -$ oc label node ci-ln-n8mqwr2-f76d1-xscn2-worker-c-6fmtx node-role.kubernetes.io/infra= ----- - -. Create a machine config pool that contains both the worker role and your custom role as machine config selector: -+ -[source,terminal] ----- -$ cat infra.mcp.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: infra -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,infra]} <1> - nodeSelector: - matchLabels: - node-role.kubernetes.io/infra: "" <2> ----- -<1> Add the worker role and your custom role. -<2> Add the label you added to the node as a `nodeSelector`. -+ -[NOTE] -==== -Custom machine config pools inherit machine configs from the worker pool. Custom pools use any machine config targeted for the worker pool, but add the ability to also deploy changes that are targeted at only the custom pool. Because a custom pool inherits resources from the worker pool, any change to the worker pool also affects the custom pool. -==== - -. After you have the YAML file, you can create the machine config pool: -+ -[source,terminal] ----- -$ oc create -f infra.mcp.yaml ----- - -. Check the machine configs to ensure that the infrastructure configuration rendered successfully: -+ -[source,terminal] ----- -$ oc get machineconfig ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED -00-master 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -00-worker 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-master-container-runtime 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-master-kubelet 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-worker-container-runtime 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-worker-kubelet 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-master-1ae2a1e0-a115-11e9-8f14-005056899d54-registries 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-master-ssh 3.2.0 31d -99-worker-1ae64748-a115-11e9-8f14-005056899d54-registries 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-worker-ssh 3.2.0 31d -rendered-infra-4e48906dca84ee702959c71a53ee80e7 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 23m -rendered-master-072d4b2da7f88162636902b074e9e28e 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-master-3e88ec72aed3886dec061df60d16d1af 02c07496ba0417b3e12b78fb32baf6293d314f79 3.2.0 31d -rendered-master-419bee7de96134963a15fdf9dd473b25 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 17d -rendered-master-53f5c91c7661708adce18739cc0f40fb 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 13d -rendered-master-a6a357ec18e5bce7f5ac426fc7c5ffcd 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 7d3h -rendered-master-dc7f874ec77fc4b969674204332da037 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-1a75960c52ad18ff5dfa6674eb7e533d 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-2640531be11ba43c61d72e82dc634ce6 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-4e48906dca84ee702959c71a53ee80e7 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 7d3h -rendered-worker-4f110718fe88e5f349987854a1147755 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 17d -rendered-worker-afc758e194d6188677eb837842d3b379 02c07496ba0417b3e12b78fb32baf6293d314f79 3.2.0 31d -rendered-worker-daa08cc1e8f5fcdeba24de60cd955cc3 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 13d ----- -+ -You should see a new machine config, with the `rendered-infra-*` prefix. - -. Optional: To deploy changes to a custom pool, create a machine config that uses the custom pool name as the label, such as `infra`. Note that this is not required and only shown for instructional purposes. In this manner, you can apply any custom configurations specific to only your infra nodes. -+ -[NOTE] -==== -After you create the new machine config pool, the MCO generates a new rendered config for that pool, and associated nodes of that pool reboot to apply the new configuration. -==== - -.. Create a machine config: -+ -[source,terminal] ----- -$ cat infra.mc.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - name: 51-infra - labels: - machineconfiguration.openshift.io/role: infra <1> -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/infratest - mode: 0644 - contents: - source: data:,infra ----- -<1> Add the label you added to the node as a `nodeSelector`. - -.. Apply the machine config to the infra-labeled nodes: -+ -[source,terminal] ----- -$ oc create -f infra.mc.yaml ----- - -. Confirm that your new machine config pool is available: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -infra rendered-infra-60e35c2e99f42d976e084fa94da4d0fc True False False 1 1 1 0 4m20s -master rendered-master-9360fdb895d4c131c7c4bebbae099c90 True False False 3 3 3 0 91m -worker rendered-worker-60e35c2e99f42d976e084fa94da4d0fc True False False 2 2 2 0 91m ----- -+ -In this example, a worker node was changed to an infra node. diff --git a/modules/creating-instance-aws-load-balancer-controller.adoc b/modules/creating-instance-aws-load-balancer-controller.adoc deleted file mode 100644 index 3485ac2842c5..000000000000 --- a/modules/creating-instance-aws-load-balancer-controller.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/create-instance-aws-load-balancer-controller.adoc - -:_content-type: PROCEDURE -[id="nw-creating-instance-aws-load-balancer-controller_{context}"] -= Creating an instance of the AWS Load Balancer Controller using AWS Load Balancer Operator - -You can install only a single instance of the `aws-load-balancer-controller` in a cluster. You can create the AWS Load Balancer Controller by using CLI. The AWS Load Balancer(ALB) Operator reconciles only the resource with the name `cluster`. - -.Prerequisites - -* You have created the `echoserver` namespace. -* You have access to the OpenShift CLI (`oc`). - -.Procedure - -. Create an `aws-load-balancer-controller` resource YAML file, for example, `sample-aws-lb.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController <1> -metadata: - name: cluster <2> -spec: - subnetTagging: Auto <3> - additionalResourceTags: <4> - - key: example.org/security-scope - value: staging - ingressClass: cloud <5> - config: - replicas: 2 <6> - enabledAddons: <7> - - AWSWAFv2 <8> ----- -<1> Defines the `aws-load-balancer-controller` resource. -<2> Defines the AWS Load Balancer Controller instance name. This instance name gets added as a suffix to all related resources. -<3> Valid options are `Auto` and `Manual`. When the value is set to `Auto`, the Operator attempts to determine the subnets that belong to the cluster and tags them appropriately. The Operator cannot determine the role correctly if the internal subnet tags are not present on internal subnet. If you installed your cluster on user-provided infrastructure, you can manually tag the subnets with the appropriate role tags and set the subnet tagging policy to `Manual`. -<4> Defines the tags used by the controller when it provisions AWS resources. -<5> The default value for this field is `alb`. The Operator provisions an `IngressClass` resource with the same name if it does not exist. -<6> Specifies the number of replicas of the controller. -<7> Specifies add-ons for AWS load balancers, which get specified through annotations. -<8> Enables the `alb.ingress.kubernetes.io/wafv2-acl-arn` annotation. - -. Create a `aws-load-balancer-controller` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-aws-lb.yaml ----- - -. After the AWS Load Balancer Controller is running, create a `deployment` resource: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment <1> -metadata: - name: <echoserver> <2> - namespace: echoserver -spec: - selector: - matchLabels: - app: echoserver - replicas: 3 <3> - template: - metadata: - labels: - app: echoserver - spec: - containers: - - image: openshift/origin-node - command: - - "/bin/socat" - args: - - TCP4-LISTEN:8080,reuseaddr,fork - - EXEC:'/bin/bash -c \"printf \\\"HTTP/1.0 200 OK\r\n\r\n\\\"; sed -e \\\"/^\r/q\\\"\"' - imagePullPolicy: Always - name: echoserver - ports: - - containerPort: 8080 ----- -<1> Defines the deployment resource. -<2> Specifies the deployment name. -<3> Specifies the number of replicas of the deployment. - -. Create a `service` resource: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service <1> -metadata: - name: <echoserver> <2> - namespace: echoserver -spec: - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - type: NodePort - selector: - app: echoserver ----- -<1> Defines the service resource. -<2> Specifies the name of the service. - -. Deploy an ALB-backed `Ingress` resource: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress <1> -metadata: - name: <echoserver> <2> - namespace: echoserver - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/target-type: instance -spec: - ingressClassName: alb - rules: - - http: - paths: - - path: / - pathType: Exact - backend: - service: - name: <echoserver> <3> - port: - number: 80 ----- -<1> Defines the ingress resource. -<2> Specifies the name of the ingress resource. -<3> Specifies the name of the service resource. - -.Verification - -* Verify the status of the `Ingress` resource to show the host of the provisioned AWS Load Balancer (ALB) by running the following command: -+ -[source,terminal] ----- -$ HOST=$(oc get ingress -n echoserver echoserver --template='{{(index .status.loadBalancer.ingress 0).hostname}}') ----- - -* Verify the status of the provisioned AWS Load Balancer (ALB) host by running the following command: -+ -[source,terminal] ----- -$ curl $HOST ----- diff --git a/modules/creating-machines-bare-metal.adoc b/modules/creating-machines-bare-metal.adoc deleted file mode 100644 index 5a7dc5f4f28f..000000000000 --- a/modules/creating-machines-bare-metal.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc - -[id="creating-machines-bare-metal_{context}"] -= Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on bare metal infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -To install {op-system} on the machines, follow either the steps to use an ISO image or network PXE booting. - -[NOTE] -==== -The compute node deployment steps included in this installation document are {op-system}-specific. If you choose instead to deploy {op-system-base}-based compute nodes, you take responsibility for all operating system life cycle management and maintenance, including performing system updates, applying patches, and completing all other required tasks. Only {op-system-base} 8 compute machines are supported. -==== - -You can configure {op-system} during ISO and PXE installations by using the following methods: - -* Kernel arguments: You can use kernel arguments to provide installation-specific information. For example, you can specify the locations of the {op-system} installation files that you uploaded to your HTTP server and the location of the Ignition config file for the type of node you are installing. For a PXE installation, you can use the `APPEND` parameter to pass the arguments to the kernel of the live installer. For an ISO installation, you can interrupt the live installation boot process to add the kernel arguments. In both installation cases, you can use special `coreos.inst.*` arguments to direct the live installer, as well as standard installation boot arguments for turning standard kernel services on or off. - -* Ignition configs: {product-title} Ignition config files (`*.ign`) are specific to the type of node you are installing. You pass the location of a bootstrap, control plane, or compute node Ignition config file during the {op-system} installation so that it takes effect on first boot. In special cases, you can create a separate, limited Ignition config to pass to the live system. That Ignition config could do a certain set of tasks, such as reporting success to a provisioning system after completing installation. This special Ignition config is consumed by the `coreos-installer` to be applied on first boot of the installed system. Do not provide the standard control plane and compute node Ignition configs to the live ISO directly. - -* `coreos-installer`: You can boot the live ISO installer to a shell prompt, which allows you to prepare the permanent system in a variety of ways before first boot. In particular, you can run the `coreos-installer` command to identify various artifacts to include, work with disk partitions, and set up networking. In some cases, you can configure features on the live system and copy them to the installed system. - -Whether to use an ISO or PXE install depends on your situation. A PXE install requires an available DHCP service and more preparation, but can make the installation process more automated. An ISO install is a more manual process and can be inconvenient if you are setting up more than a few machines. - -[NOTE] -==== -As of {product-title} 4.6, the {op-system} ISO and other installation artifacts provide support for installation on disks with 4K sectors. -==== diff --git a/modules/creating-multiple-ingress-through-single-alb.adoc b/modules/creating-multiple-ingress-through-single-alb.adoc deleted file mode 100644 index 184f75c324f3..000000000000 --- a/modules/creating-multiple-ingress-through-single-alb.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/multiple-ingress-through-single-alb.adoc - -:_content-type: PROCEDURE -[id="nw-creating-multiple-ingress-through-single-alb_{context}"] -= Creating multiple ingresses through a single AWS Load Balancer - -You can route the traffic to multiple Ingresses through a single AWS Load Balancer (ALB) by using the CLI. - -.Prerequisites - -* You have an access to the OpenShift CLI (`oc`). - -.Procedure - -. Create an `IngressClassParams` resource YAML file, for example, `sample-single-lb-params.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: elbv2.k8s.aws/v1beta1 <1> -kind: IngressClassParams -metadata: - name: <single-lb-params> <2> -spec: - group: - name: single-lb <3> ----- -<1> Defines the API group and version of the `IngressClassParams` resource. -<2> Specifies the name of the `IngressClassParams` resource. -<3> Specifies the name of the `IngressGroup`. All Ingresses of this class belong to this `IngressGroup`. - -. Create an `IngressClassParams` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-single-lb-params.yaml ----- - -. Create an `IngressClass` resource YAML file, for example, `sample-single-lb.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 <1> -kind: IngressClass -metadata: - name: <single-lb> <2> -spec: - controller: ingress.k8s.aws/alb <3> - parameters: - apiGroup: elbv2.k8s.aws <4> - kind: IngressClassParams <5> - name: single-lb <6> ----- -<1> Defines the API group and the version of the `IngressClass` resource. -<2> Specifies the name of the `IngressClass`. -<3> Defines the controller name, common for all `IngressClasses`. The `aws-load-balancer-controller` reconciles the controller. -<4> Defines the API group of the `IngressClassParams` resource. -<5> Defines the resource type of the `IngressClassParams` resource. -<6> Defines the name of the `IngressClassParams` resource. - -. Create an `IngressClass` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-single-lb.yaml ----- - -. Create an `Ingress` resource YAML file, for example, `sample-multiple-ingress.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 <1> -kind: Ingress -metadata: - name: <example-1> <1> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing <2> - alb.ingress.kubernetes.io/group.order: "1" <3> -spec: - ingressClass: alb <4> - rules: - - host: example.com <5> - http: - paths: - - path: /blog <6> - backend: - service: - name: <example-1> <7> - port: - number: 80 <8> -kind: Ingress -metadata: - name: <example-2> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/group.order: "2" -spec: - ingressClass: alb - rules: - - host: example.com - http: - paths: - - path: /store - backend: - service: - name: <example-2> - port: - number: 80 -kind: Ingress - metadata: - name: <example-3> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/group.order: "3" -spec: - ingressClass: alb - rules: - - host: example.com - http: - paths: - - path: / - backend: - service: - name: <example-3> - port: - number: 80 ----- -<1> Specifies the name of an ingress. -<2> Indicates the load balancer to provision in the public subnet and makes it accessible over the internet. -<3> Specifies the order in which the rules from the Ingresses are matched when the request is received at the load balancer. -<4> Specifies the Ingress Class that belongs to this ingress. -<5> Defines the name of a domain used for request routing. -<6> Defines the path that must route to the service. -<7> Defines the name of the service that serves the endpoint configured in the ingress. -<8> Defines the port on the service that serves the endpoint. - -. Create the `Ingress` resources by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-multiple-ingress.yaml ----- diff --git a/modules/creating-new-osdk-v0-1-0-project.adoc b/modules/creating-new-osdk-v0-1-0-project.adoc deleted file mode 100644 index 20af6c50aada..000000000000 --- a/modules/creating-new-osdk-v0-1-0-project.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_content-type: PROCEDURE -[id="creating-new-operator-sdk-v0-1-0-project_{context}"] -= Creating a new Operator SDK v0.1.0 project - -Rename your Operator SDK v0.0.x project and create a new v0.1.0 project in its -place. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK - -.Procedure - -. Ensure the SDK version is v0.1.0: -+ ----- -$ operator-sdk --version -operator-sdk version 0.1.0 ----- - -. Create a new project: -+ ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ -$ cd $GOPATH/src/github.com/example-inc/ -$ mv memcached-operator old-memcached-operator -$ operator-sdk new memcached-operator --skip-git-init -$ ls -memcached-operator old-memcached-operator ----- - -. Copy over `.git` from old project: -+ ----- -$ cp -rf old-memcached-operator/.git memcached-operator/.git ----- diff --git a/modules/creating-rolling-deployments-CLI.adoc b/modules/creating-rolling-deployments-CLI.adoc deleted file mode 100644 index ae99abf954de..000000000000 --- a/modules/creating-rolling-deployments-CLI.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="deployments-creating-rolling-deployment_{context}"] -= Creating a rolling deployment - -Rolling deployments are the default type in {product-title}. You can create a rolling deployment using the CLI. - -.Procedure - -. Create an application based on the example deployment images found in link:https://quay.io/repository/openshifttest/deployment-example[Quay.io]: -+ -[source,terminal] ----- -$ oc new-app quay.io/openshifttest/deployment-example:latest ----- - -. If you have the router installed, make the application available via a route or use the service IP directly. -+ -[source,terminal] ----- -$ oc expose svc/deployment-example ----- - -. Browse to the application at `deployment-example.<project>.<router_domain>` to verify you see the `v1` image. - -. Scale the `DeploymentConfig` object up to three replicas: -+ -[source,terminal] ----- -$ oc scale dc/deployment-example --replicas=3 ----- - -. Trigger a new deployment automatically by tagging a new version of the example as the `latest` tag: -+ -[source,terminal] ----- -$ oc tag deployment-example:v2 deployment-example:latest ----- - -. In your browser, refresh the page until you see the `v2` image. - -. When using the CLI, the following command shows how many pods are on version 1 and how many are on version 2. In the web console, the pods are progressively added to v2 and removed from v1: -+ -[source,terminal] ----- -$ oc describe dc deployment-example ----- - -During the deployment process, the new replication controller is incrementally scaled up. After the new pods are marked as `ready` (by passing their readiness check), the deployment process continues. - -If the pods do not become ready, the process aborts, and the deployment rolls back to its previous version. diff --git a/modules/creating-runtimeclass.adoc b/modules/creating-runtimeclass.adoc deleted file mode 100644 index a16d6f4a6ed9..000000000000 --- a/modules/creating-runtimeclass.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/scheduling-windows-workloads.adoc - -:_content-type: PROCEDURE -[id="creating-runtimeclass_{context}"] -= Creating a RuntimeClass object to encapsulate scheduling mechanisms - -Using a `RuntimeClass` object simplifies the use of scheduling mechanisms like taints and tolerations; you deploy a runtime class that encapsulates your taints and tolerations and then apply it to your pods to schedule them to the appropriate node. Creating a runtime class is also necessary in clusters that support multiple operating system variants. - -.Procedure - -. Create a `RuntimeClass` object YAML file. For example, `runtime-class.yaml`: -+ -[source,yaml] ----- -apiVersion: node.k8s.io/v1beta1 -kind: RuntimeClass -metadata: - name: <runtime_class_name> <1> -handler: 'runhcs-wcow-process' -scheduling: - nodeSelector: <2> - kubernetes.io/os: 'windows' - kubernetes.io/arch: 'amd64' - node.kubernetes.io/windows-build: '10.0.17763' - tolerations: <3> - - effect: NoSchedule - key: os - operator: Equal - value: "Windows" ----- -<1> Specify the `RuntimeClass` object name, which is defined in the pods you want to be managed by this runtime class. -<2> Specify labels that must be present on nodes that support this runtime class. Pods using this runtime class can only be scheduled to a node matched by this selector. The node selector of the runtime class is merged with the existing node selector of the pod. Any conflicts prevent the pod from being scheduled to the node. -<3> Specify tolerations to append to pods, excluding duplicates, running with this runtime class during admission. This combines the set of nodes tolerated by the pod and the runtime class. - -. Create the `RuntimeClass` object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f runtime-class.yaml ----- - -. Apply the `RuntimeClass` object to your pod to ensure it is scheduled to the appropriate operating system variant: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-windows-pod -spec: - runtimeClassName: <runtime_class_name> <1> -... ----- -<1> Specify the runtime class to manage the scheduling of your pod. diff --git a/modules/creating-serverless-apps-admin-console.adoc b/modules/creating-serverless-apps-admin-console.adoc deleted file mode 100644 index 1c8b8d5ddcf9..000000000000 --- a/modules/creating-serverless-apps-admin-console.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// serverless/admin_guide/serverless-cluster-admin-serving.adoc - -:_content-type: PROCEDURE -[id="creating-serverless-apps-admin-console_{context}"] -= Creating serverless applications using the Administrator perspective - -include::snippets/serverless-apps.adoc[] - -After the service is created and the application is deployed, Knative creates an immutable revision for this version of the application. Knative also performs network programming to create a route, ingress, service, and load balancer for your application and automatically scales your pods up and down based on traffic. - -.Prerequisites - -To create serverless applications using the *Administrator* perspective, ensure that you have completed the following steps. - -* The {ServerlessOperatorName} and Knative Serving are installed. -* You have logged in to the web console and are in the *Administrator* perspective. - -.Procedure - -. Navigate to the *Serverless* -> *Serving* page. -. In the *Create* list, select *Service*. -. Manually enter YAML or JSON definitions, or by dragging and dropping a file into the editor. -. Click *Create*. diff --git a/modules/creating-serverless-apps-kn.adoc b/modules/creating-serverless-apps-kn.adoc deleted file mode 100644 index d43d64d8725f..000000000000 --- a/modules/creating-serverless-apps-kn.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/develop/serverless-applications.adoc -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: PROCEDURE -[id="creating-serverless-apps-kn_{context}"] -= Creating serverless applications by using the Knative CLI - -Using the Knative (`kn`) CLI to create serverless applications provides a more streamlined and intuitive user interface over modifying YAML files directly. You can use the `kn service create` command to create a basic serverless application. - -.Prerequisites - -* {ServerlessOperatorName} and Knative Serving are installed on your cluster. -* You have installed the Knative (`kn`) CLI. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. - -.Procedure - -* Create a Knative service: -+ -[source,terminal] ----- -$ kn service create <service-name> --image <image> --tag <tag-value> ----- -+ -Where: -+ -** `--image` is the URI of the image for the application. -** `--tag` is an optional flag that can be used to add a tag to the initial revision that is created with the service. -+ -.Example command -[source,terminal] ----- -$ kn service create event-display \ - --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- -+ -.Example output -[source,terminal] ----- -Creating service 'event-display' in namespace 'default': - - 0.271s The Route is still working to reflect the latest desired specification. - 0.580s Configuration "event-display" is waiting for a Revision to become ready. - 3.857s ... - 3.861s Ingress has not yet been reconciled. - 4.270s Ready to serve. - -Service 'event-display' created with latest revision 'event-display-bxshg-1' and URL: -http://event-display-default.apps-crc.testing ----- diff --git a/modules/creating-serverless-apps-yaml.adoc b/modules/creating-serverless-apps-yaml.adoc deleted file mode 100644 index 874b60304db7..000000000000 --- a/modules/creating-serverless-apps-yaml.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/develop/serverless-applications.adoc - -:_content-type: PROCEDURE -[id="creating-serverless-apps-yaml_{context}"] -= Creating serverless applications using YAML - -Creating Knative resources by using YAML files uses a declarative API, which enables you to describe applications declaratively and in a reproducible manner. To create a serverless application by using YAML, you must create a YAML file that defines a Knative `Service` object, then apply it by using `oc apply`. - -After the service is created and the application is deployed, Knative creates an immutable revision for this version of the application. Knative also performs network programming to create a route, ingress, service, and load balancer for your application and automatically scales your pods up and down based on traffic. - -.Prerequisites - -* {ServerlessOperatorName} and Knative Serving are installed on your cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* Install the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file containing the following sample code: -+ -[source,yaml] ----- -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: event-delivery - namespace: default -spec: - template: - spec: - containers: - - image: quay.io/openshift-knative/knative-eventing-sources-event-display:latest - env: - - name: RESPONSE - value: "Hello Serverless!" ----- -. Navigate to the directory where the YAML file is contained, and deploy the application by applying the YAML file: -+ -[source,terminal] ----- -$ oc apply -f <filename> ----- diff --git a/modules/creating-the-vsphere-windows-vm-golden-image.adoc b/modules/creating-the-vsphere-windows-vm-golden-image.adoc deleted file mode 100644 index 1dd90a54be73..000000000000 --- a/modules/creating-the-vsphere-windows-vm-golden-image.adoc +++ /dev/null @@ -1,155 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc - -:_content-type: PROCEDURE -[id="creating-the-vsphere-windows-vm-golden-image_{context}"] -= Creating the vSphere Windows VM golden image - -Create a vSphere Windows virtual machine (VM) golden image. - -.Prerequisites - -* You have created a private/public key pair, which is used to configure key-based authentication in the OpenSSH server. The private key must also be configured in the Windows Machine Config Operator (WMCO) namespace. This is required to allow the WMCO to communicate with the Windows VM. See the "Configuring a secret for the Windows Machine Config Operator" section for more details. - -[NOTE] -==== -You must use link:https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell[Microsoft PowerShell] commands in several cases when creating your Windows VM. PowerShell commands in this guide are distinguished by the `PS C:\>` prefix. -==== - -.Procedure - -. Select a compatible Windows Server version. Currently, the Windows Machine Config Operator (WMCO) stable version supports Windows Server 2022 Long-Term Servicing Channel with the OS-level container networking patch link:https://support.microsoft.com/en-us/topic/april-25-2022-kb5012637-os-build-20348-681-preview-2233d69c-d4a5-4be9-8c24-04a450861a8d[KB5012637]. - -. Create a new VM in the vSphere client using the VM golden image with a compatible Windows Server version. For more information about compatible versions, see the "Windows Machine Config Operator prerequisites" section of the "Red Hat OpenShift support for Windows Containers release notes." -+ -[IMPORTANT] -==== -The virtual hardware version for your VM must meet the infrastructure requirements for {product-title}. For more information, see the "VMware vSphere infrastructure requirements" section in the {product-title} documentation. Also, you can refer to VMware's documentation on link:https://kb.vmware.com/s/article/1003746[virtual machine hardware versions]. -==== - -. Install and configure VMware Tools version 11.0.6 or greater on the Windows VM. See the link:https://docs.vmware.com/en/VMware-Tools/index.html[VMware Tools documentation] for more information. - -. After installing VMware Tools on the Windows VM, verify the following: - -.. The `C:\ProgramData\VMware\VMware Tools\tools.conf` file exists with the following entry: -+ -[source,ini] ----- -exclude-nics= ----- -+ -If the `tools.conf` file does not exist, create it with the `exclude-nics` option uncommented and set as an empty value. -+ -This entry ensures the cloned vNIC generated on the Windows VM by the hybrid-overlay is not ignored. - -.. The Windows VM has a valid IP address in vCenter: -+ -[source,terminal] ----- -C:\> ipconfig ----- - -.. The VMTools Windows service is running: -+ -[source,posh] ----- -PS C:\> Get-Service -Name VMTools | Select Status, StartType ----- - -. Install and configure the OpenSSH Server on the Windows VM. See Microsoft's documentation on link:https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse[installing OpenSSH] for more details. - -. Set up SSH access for an administrative user. See Microsoft's documentation on the link:https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_keymanagement#administrative-user[Administrative user] to do this. -+ -[IMPORTANT] -==== -The public key used in the instructions must correspond to the private key you create later in the WMCO namespace that holds your secret. See the "Configuring a secret for the Windows Machine Config Operator" section for more details. -==== - -. You must create a new firewall rule in the Windows VM that allows incoming connections for container logs. Run the following PowerShell command to create the firewall rule on TCP port 10250: -+ -[source,posh] ----- -PS C:\> New-NetFirewallRule -DisplayName "ContainerLogsPort" -LocalPort 10250 -Enabled True -Direction Inbound -Protocol TCP -Action Allow -EdgeTraversalPolicy Allow ----- - -. Clone the Windows VM so it is a reusable image. Follow the VMware documentation on how to link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-1E185A80-0B97-4B46-A32B-3EF8F309BEED.html[clone an existing virtual machine] for more details. - -. In the cloned Windows VM, run the link:+++https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/sysprep--generalize--a-windows-installation+++[Windows Sysprep tool]: -+ -[source,terminal] ----- -C:\> C:\Windows\System32\Sysprep\sysprep.exe /generalize /oobe /shutdown /unattend:<path_to_unattend.xml> <1> ----- -<1> Specify the path to your `unattend.xml` file. -+ -[NOTE] -==== -There is a limit on how many times you can run the `sysprep` command on a Windows image. Consult Microsoft's link:+++https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/sysprep--generalize--a-windows-installation#limits-on-how-many-times-you-can-run-sysprep+++[documentation] for more information. -==== -+ -An example `unattend.xml` is provided, which maintains all the changes needed for the WMCO. You must modify this example; it cannot be used directly. -+ -.Example `unattend.xml` -[%collapsible] -==== -[source,xml] ----- -<?xml version="1.0" encoding="UTF-8"?> -<unattend xmlns="urn:schemas-microsoft-com:unattend"> - <settings pass="specialize"> - <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-International-Core" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> - <InputLocale>0409:00000409</InputLocale> - <SystemLocale>en-US</SystemLocale> - <UILanguage>en-US</UILanguage> - <UILanguageFallback>en-US</UILanguageFallback> - <UserLocale>en-US</UserLocale> - </component> - <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Security-SPP-UX" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> - <SkipAutoActivation>true</SkipAutoActivation> - </component> - <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-SQMApi" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> - <CEIPEnabled>0</CEIPEnabled> - </component> - <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> - <ComputerName>winhost</ComputerName> <1> - </component> - </settings> - <settings pass="oobeSystem"> - <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> - <AutoLogon> - <Enabled>false</Enabled> <2> - </AutoLogon> - <OOBE> - <HideEULAPage>true</HideEULAPage> - <HideLocalAccountScreen>true</HideLocalAccountScreen> - <HideOEMRegistrationScreen>true</HideOEMRegistrationScreen> - <HideOnlineAccountScreens>true</HideOnlineAccountScreens> - <HideWirelessSetupInOOBE>true</HideWirelessSetupInOOBE> - <NetworkLocation>Work</NetworkLocation> - <ProtectYourPC>1</ProtectYourPC> - <SkipMachineOOBE>true</SkipMachineOOBE> - <SkipUserOOBE>true</SkipUserOOBE> - </OOBE> - <RegisteredOrganization>Organization</RegisteredOrganization> - <RegisteredOwner>Owner</RegisteredOwner> - <DisableAutoDaylightTimeSet>false</DisableAutoDaylightTimeSet> - <TimeZone>Eastern Standard Time</TimeZone> - <UserAccounts> - <AdministratorPassword> - <Value>MyPassword</Value> <3> - <PlainText>true</PlainText> - </AdministratorPassword> - </UserAccounts> - </component> - </settings> -</unattend> ----- -<1> Specify the `ComputerName`, which must follow the link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names[Kubernetes' names specification]. These specifications also apply to Guest OS customization performed on the resulting template while creating new VMs. -<2> Disable the automatic logon to avoid the security issue of leaving an open terminal with Administrator privileges at boot. This is the default value and must not be changed. -<3> Replace the `MyPassword` placeholder with the password for the Administrator account. This prevents the built-in Administrator account from having a blank password by default. Follow Microsoft's link:https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements[best practices for choosing a password]. -==== -+ -After the Sysprep tool has completed, the Windows VM will power off. You must not use or power on this VM anymore. - -. Convert the Windows VM to link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-5B3737CC-28DB-4334-BD18-6E12011CDC9F.html[a template in vCenter]. diff --git a/modules/creating-your-first-content.adoc b/modules/creating-your-first-content.adoc deleted file mode 100644 index 20316e749317..000000000000 --- a/modules/creating-your-first-content.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// assembly_getting-started-modular-docs-ocp.adoc - -// Base the file name and the ID on the module title. For example: -// * file name: doing-procedure-a.adoc -// * ID: [id="doing-procedure-a"] -// * Title: = Doing procedure A - -[id="creating-your-first-content_{context}"] -= Creating your first content - -In this procedure, you will create your first example content using modular -docs for the OpenShift docs repository. - -.Prerequisites - -* You have forked and then cloned the OpenShift docs repository locally. -* You have downloaded and are using Atom text editor for creating content. -* You have installed AsciiBinder (the build tool for OpenShift docs). - -.Procedure - -. Navigate to your locally cloned OpenShift docs repository on a command line. - -. Create a new feature branch: - -+ ----- -git checkout master -git checkout -b my_first_mod_docs ----- -+ -. If there is no `modules` directory in the root folder, create one. - -. In this `modules` directory, create a file called `my-first-module.adoc`. - -. Open this newly created file in Atom and copy into this file the contents from -the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template] -from Modular docs repository. - -. Replace the content in this file with some example text using the guidelines -in the comments. Give this module the title `My First Module`. Save this file. -You have just created your first module. - -. Create a new directory from the root of your OpenShift docs repository and -call it `my_guide`. - -. In this my_guide directory, create a new file called -`assembly_my-first-assembly.adoc`. - -. Open this newly created file in Atom and copy into this file the contents from -the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_ASSEMBLY_a-collection-of-modules.adoc[assembly template] -from Modular docs repository. - -. Replace the content in this file with some example text using the guidelines -in the comments. Give this assembly the title: `My First Assembly`. - -. Before the first anchor id in this assembly file, add a `:context:` attribute: - -+ -`:context: assembly-first-content` - -. After the Prerequisites section, add the module created earlier (the following is -deliberately spelled incorrectly to pass validation. Use 'include' instead of 'ilude'): - -+ -`ilude::modules/my-first-module.adoc[leveloffset=+1]` - -+ -Remove the other includes that are present in this file. Save this file. - -. Open up `my-first-module.adoc` in the `modules` folder. At the top of -this file, in the comments section, add the following to indicate in which -assembly this module is being used: - -+ ----- -// Module included in the following assemblies: -// -// my_guide/assembly_my-first-assembly.adoc ----- - -. Open up `_topic_map.yml` from the root folder and add these lines at the end -of this file and then save. - -+ ----- ---- -Name: OpenShift CCS Mod Docs First Guide -Dir: my_guide -Distros: openshift-* -Topics: -- Name: My First Assembly - File: assembly_my-first-assembly ----- - -. On the command line, run `asciibinder` from the root folder of openshift-docs. -You don't have to add or commit your changes for asciibinder to run. - -. After the asciibinder build completes, open up your browser and navigate to -<YOUR-LOCAL-GIT-REPO-LOCATION>/openshift-docs/_preview/openshift-enterprise/my_first_mod_docs/my_guide/assembly_my-first-assembly.html - -. Confirm that your book `my_guide` has an assembly `My First Assembly` with the -contents from your module `My First Module`. - -NOTE: You can delete this branch now if you are done testing. This branch -shouldn't be submitted to the upstream openshift-docs repository. diff --git a/modules/csr-management.adoc b/modules/csr-management.adoc deleted file mode 100644 index c3e8fa33d704..000000000000 --- a/modules/csr-management.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-user-infra.adoc -// installing/installing_aws/installing-restricted-networks-aws.adoc -// installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// installing/installing_azure/installing-azure-user-infra.adoc -// installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// installing/installing_bare_metal/installing-bare-metal.adoc -// installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// installing/installing_gcp/installing-gcp-user-infra.adoc -// installing/installing_gcp/installing-restricted-networks-gcp.adoc -// installing/installing_ibm_power/installing-ibm-power.adoc -// installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// installing/installing_ibm_z/installing-ibm-z.adoc -// installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// installing/installing_vsphere/installing-vsphere.adoc -// machine_management/adding-rhel-compute.adoc -// machine_management/more-rhel-compute.adoc -// post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="csr-management_{context}"] -= Certificate signing requests management - -Because your cluster has limited access to automatic machine management when you use infrastructure that you provision, you must provide a mechanism for approving cluster certificate signing requests (CSRs) after installation. The `kube-controller-manager` only approves the kubelet client CSRs. The `machine-approver` cannot guarantee the validity of a serving certificate that is requested by using kubelet credentials because it cannot confirm that the correct machine issued the request. You must determine and implement a method of verifying the validity of the kubelet serving certificate requests and approving them. diff --git a/modules/custom-tuning-example.adoc b/modules/custom-tuning-example.adoc deleted file mode 100644 index 79459fdd625a..000000000000 --- a/modules/custom-tuning-example.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -[id="custom-tuning-example_{context}"] -= Custom tuning examples - -*Using TuneD profiles from the default CR* - -The following CR applies custom node-level tuning for -{product-title} nodes with label -`tuned.openshift.io/ingress-node-label` set to any value. - -.Example: custom tuning using the openshift-control-plane TuneD profile -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: ingress - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=A custom OpenShift ingress profile - include=openshift-control-plane - [sysctl] - net.ipv4.ip_local_port_range="1024 65535" - net.ipv4.tcp_tw_reuse=1 - name: openshift-ingress - recommend: - - match: - - label: tuned.openshift.io/ingress-node-label - priority: 10 - profile: openshift-ingress ----- - -[IMPORTANT] -==== -Custom profile writers are strongly encouraged to include the default TuneD -daemon profiles shipped within the default Tuned CR. The example above uses the -default `openshift-control-plane` profile to accomplish this. -==== - -*Using built-in TuneD profiles* - -Given the successful rollout of the NTO-managed daemon set, the TuneD operands -all manage the same version of the TuneD daemon. To list the built-in TuneD -profiles supported by the daemon, query any TuneD pod in the following way: - -[source,terminal] ----- -$ oc exec $tuned_pod -n openshift-cluster-node-tuning-operator -- find /usr/lib/tuned/ -name tuned.conf -printf '%h\n' | sed 's|^.*/||' ----- - -You can use the profile names retrieved by this in your custom tuning specification. - -.Example: using built-in hpc-compute TuneD profile -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: openshift-node-hpc-compute - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Custom OpenShift node profile for HPC compute workloads - include=openshift-node,hpc-compute - name: openshift-node-hpc-compute - - recommend: - - match: - - label: tuned.openshift.io/openshift-node-hpc-compute - priority: 20 - profile: openshift-node-hpc-compute ----- - -In addition to the built-in `hpc-compute` profile, the example above includes -the `openshift-node` TuneD daemon profile shipped within the default -Tuned CR to use OpenShift-specific tuning for compute nodes. - -// Note the issues with including profiles sharing the same ancestor: see link:https://bugzilla.redhat.com/show_bug.cgi?id=1825882[BZ#1825882] - -*Overriding host-level sysctls* - -Various kernel parameters can be changed at runtime by using `/run/sysctl.d/`, `/etc/sysctl.d/`, and `/etc/sysctl.conf` host configuration files. {product-title} adds several host configuration files which set kernel parameters at runtime; for example, `net.ipv[4-6].`, `fs.inotify.`, and `vm.max_map_count`. These runtime parameters provide basic functional tuning for the system prior to the kubelet and the Operator start. - -The Operator does not override these settings unless the `reapply_sysctl` option is set to `false`. Setting this option to `false` results in `TuneD` not applying the settings from the host configuration files after it applies its custom profile. - -.Example: overriding host-level sysctls -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: openshift-no-reapply-sysctl - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Custom OpenShift profile - include=openshift-node - [sysctl] - vm.max_map_count=>524288 - name: openshift-no-reapply-sysctl - recommend: - - match: - - label: tuned.openshift.io/openshift-no-reapply-sysctl - priority: 15 - profile: openshift-no-reapply-sysctl - operand: - tunedConfig: - reapply_sysctl: false ----- diff --git a/modules/custom-tuning-specification.adoc b/modules/custom-tuning-specification.adoc deleted file mode 100644 index 463a83e74a32..000000000000 --- a/modules/custom-tuning-specification.adoc +++ /dev/null @@ -1,394 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc -// * rosa_hcp/rosa-tuning-config.adoc - -ifeval::["{context}" == "rosa-tuning-config"] -:rosa-hcp-tuning: -endif::[] - -[id="custom-tuning-specification_{context}"] -= Custom tuning specification - -The custom resource (CR) for the Operator has two major sections. The first section, `profile:`, is a list of TuneD profiles and their names. The second, `recommend:`, defines the profile selection logic. - -Multiple custom tuning specifications can co-exist as multiple CRs in the Operator's namespace. The existence of new CRs or the deletion of old CRs is detected by the Operator. All existing custom tuning specifications are merged and appropriate objects for the containerized TuneD daemons are updated. - -*Management state* - -The Operator Management state is set by adjusting the default Tuned CR. By default, the Operator is in the Managed state and the `spec.managementState` field is not present in the default Tuned CR. Valid values for the Operator Management state are as follows: - - * Managed: the Operator will update its operands as configuration resources are updated - * Unmanaged: the Operator will ignore changes to the configuration resources - * Removed: the Operator will remove its operands and resources the Operator provisioned - -*Profile data* - -The `profile:` section lists TuneD profiles and their names. - -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -profile: -- name: tuned_profile_1 - data: | - # TuneD profile specification - [main] - summary=Description of tuned_profile_1 profile - - [sysctl] - net.ipv4.ip_forward=1 - # ... other sysctl's or other TuneD daemon plugins supported by the containerized TuneD - -# ... - -- name: tuned_profile_n - data: | - # TuneD profile specification - [main] - summary=Description of tuned_profile_n profile - - # tuned_profile_n profile settings ----- -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,json] ----- -{ - "profile": [ - { - "name": "tuned_profile_1", - "data": "# TuneD profile specification\n[main]\nsummary=Description of tuned_profile_1 profile\n\n[sysctl]\nnet.ipv4.ip_forward=1\n# ... other sysctl's or other TuneD daemon plugins supported by the containerized TuneD\n" - }, - { - "name": "tuned_profile_n", - "data": "# TuneD profile specification\n[main]\nsummary=Description of tuned_profile_n profile\n\n# tuned_profile_n profile settings\n" - } - ] -} ----- -endif::[] - -*Recommended profiles* - -The `profile:` selection logic is defined by the `recommend:` section of the CR. The `recommend:` section is a list of items to recommend the profiles based on a selection criteria. - -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -recommend: -<recommend-item-1> -# ... -<recommend-item-n> ----- -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,json] ----- -"recommend": [ - { - "recommend-item-1": details_of_recommendation, - # ... - "recommend-item-n": details_of_recommendation, - } - ] ----- -endif::[] - -The individual items of the list: - -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -- machineConfigLabels: <1> - <mcLabels> <2> - match: <3> - <match> <4> - priority: <priority> <5> - profile: <tuned_profile_name> <6> - operand: <7> - debug: <bool> <8> - tunedConfig: - reapply_sysctl: <bool> <9> ----- -<1> Optional. -<2> A dictionary of key/value `MachineConfig` labels. The keys must be unique. -<3> If omitted, profile match is assumed unless a profile with a higher priority matches first or `machineConfigLabels` is set. -<4> An optional list. -<5> Profile ordering priority. Lower numbers mean higher priority (`0` is the highest priority). -<6> A TuneD profile to apply on a match. For example `tuned_profile_1`. -<7> Optional operand configuration. -<8> Turn debugging on or off for the TuneD daemon. Options are `true` for on or `false` for off. The default is `false`. -<9> Turn `reapply_sysctl` functionality on or off for the TuneD daemon. Options are `true` for on and `false` for off. -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,json] ----- -{ - "profile": [ - { - # ... - } - ], - "recommend": [ - { - "profile": <tuned_profile_name>, <1> - "priority": <priority>, <2> - "machineConfigLabels": { <Key_Pair_for_MachineConfig> <3> - }, - "match": [ <4> - { - "label": <label_information> <5> - }, - ] - }, - ] -} ----- -<1> Profile ordering priority. Lower numbers mean higher priority (`0` is the highest priority). -<2> A TuneD profile to apply on a match. For example `tuned_profile_1`. -<3> Optional: A dictionary of key-value pairs `MachineConfig` labels. The keys must be unique. -<4> If omitted, profile match is assumed unless a profile with a higher priority matches first or `machineConfigLabels` is set. -<5> The label for the profile matched items. -endif::[] - -`<match>` is an optional list recursively defined as follows: - -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -- label: <label_name> <1> - value: <label_value> <2> - type: <label_type> <3> - <match> <4> ----- -<1> Node or pod label name. -<2> Optional node or pod label value. If omitted, the presence of `<label_name>` is enough to match. -<3> Optional object type (`node` or `pod`). If omitted, `node` is assumed. -<4> An optional `<match>` list. -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,yaml] ----- -"match": [ - { - "label": <1> - }, -] ----- -<1> Node or pod label name. -endif::[] - -If `<match>` is not omitted, all nested `<match>` sections must also evaluate to `true`. Otherwise, `false` is assumed and the profile with the respective `<match>` section will not be applied or recommended. Therefore, the nesting (child `<match>` sections) works as logical AND operator. Conversely, if any item of the `<match>` list matches, the entire `<match>` list evaluates to `true`. Therefore, the list acts as logical OR operator. - -If `machineConfigLabels` is defined, machine config pool based matching is turned on for the given `recommend:` list item. `<mcLabels>` specifies the labels for a machine config. The machine config is created automatically to apply host settings, such as kernel boot parameters, for the profile `<tuned_profile_name>`. This involves finding all machine config pools with machine config selector matching `<mcLabels>` and setting the profile `<tuned_profile_name>` on all nodes that are assigned the found machine config pools. To target nodes that have both master and worker roles, you must use the master role. - -The list items `match` and `machineConfigLabels` are connected by the logical OR operator. The `match` item is evaluated first in a short-circuit manner. Therefore, if it evaluates to `true`, the `machineConfigLabels` item is not considered. - -[IMPORTANT] -==== -When using machine config pool based matching, it is advised to group nodes with the same hardware configuration into the same machine config pool. Not following this practice might result in TuneD operands calculating conflicting kernel parameters for two or more nodes sharing the same machine config pool. -==== - -.Example: node or pod label based matching - -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -- match: - - label: tuned.openshift.io/elasticsearch - match: - - label: node-role.kubernetes.io/master - - label: node-role.kubernetes.io/infra - type: pod - priority: 10 - profile: openshift-control-plane-es -- match: - - label: node-role.kubernetes.io/master - - label: node-role.kubernetes.io/infra - priority: 20 - profile: openshift-control-plane -- priority: 30 - profile: openshift-node ----- -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,JSON] ----- -[ - { - "match": [ - { - "label": "tuned.openshift.io/elasticsearch", - "match": [ - { - "label": "node-role.kubernetes.io/master" - }, - { - "label": "node-role.kubernetes.io/infra" - } - ], - "type": "pod" - } - ], - "priority": 10, - "profile": "openshift-control-plane-es" - }, - { - "match": [ - { - "label": "node-role.kubernetes.io/master" - }, - { - "label": "node-role.kubernetes.io/infra" - } - ], - "priority": 20, - "profile": "openshift-control-plane" - }, - { - "priority": 30, - "profile": "openshift-node" - } -] ----- -endif::[] - -The CR above is translated for the containerized TuneD daemon into its `recommend.conf` file based on the profile priorities. The profile with the highest priority (`10`) is `openshift-control-plane-es` and, therefore, it is considered first. The containerized TuneD daemon running on a given node looks to see if there is a pod running on the same node with the `tuned.openshift.io/elasticsearch` label set. If not, the entire `<match>` section evaluates as `false`. If there is such a pod with the label, in order for the `<match>` section to evaluate to `true`, the node label also needs to be `node-role.kubernetes.io/master` or `node-role.kubernetes.io/infra`. - -If the labels for the profile with priority `10` matched, `openshift-control-plane-es` profile is applied and no other profile is considered. If the node/pod label combination did not match, the second highest priority profile (`openshift-control-plane`) is considered. This profile is applied if the containerized TuneD pod runs on a node with labels `node-role.kubernetes.io/master` or `node-role.kubernetes.io/infra`. - -Finally, the profile `openshift-node` has the lowest priority of `30`. It lacks the `<match>` section and, therefore, will always match. It acts as a profile catch-all to set `openshift-node` profile, if no other profile with higher priority matches on a given node. - -image::node-tuning-operator-workflow-revised.png[Decision workflow] - -.Example: machine config pool based matching -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: openshift-node-custom - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Custom OpenShift node profile with an additional kernel parameter - include=openshift-node - [bootloader] - cmdline_openshift_node_custom=+skew_tick=1 - name: openshift-node-custom - - recommend: - - machineConfigLabels: - machineconfiguration.openshift.io/role: "worker-custom" - priority: 20 - profile: openshift-node-custom ----- -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,JSON] ----- -{ - "apiVersion": "tuned.openshift.io/v1", - "kind": "Tuned", - "metadata": { - "name": "openshift-node-custom", - "namespace": "openshift-cluster-node-tuning-operator" - }, - "spec": { - "profile": [ - { - "data": "[main]\nsummary=Custom OpenShift node profile with an additional kernel parameter\ninclude=openshift-node\n[bootloader]\ncmdline_openshift_node_custom=+skew_tick=1\n", - "name": "openshift-node-custom" - } - ], - "recommend": [ - { - "machineConfigLabels": { - "machineconfiguration.openshift.io/role": "worker-custom" - }, - "priority": 20, - "profile": "openshift-node-custom" - } - ] - } -} ----- -endif::[] - -To minimize node reboots, label the target nodes with a label the machine config pool's node selector will match, then create the Tuned CR above and finally create the custom machine config pool itself. - -// $ oc label node <node> node-role.kubernetes.io/worker-custom= -// $ oc create -f <tuned-cr-above> -// $ oc create -f- <<EOF -// apiVersion: machineconfiguration.openshift.io/v1 -// kind: MachineConfigPool -// metadata: -// name: worker-custom -// labels: -// worker-custom: "" -// spec: -// machineConfigSelector: -// matchExpressions: -// - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,worker-custom]} -// nodeSelector: -// matchLabels: -// node-role.kubernetes.io/worker-custom: "" -// EOF - -*Cloud provider-specific TuneD profiles* - -With this functionality, all Cloud provider-specific nodes can conveniently be assigned a TuneD profile specifically tailored to a given Cloud provider on a {product-title} cluster. This can be accomplished without adding additional node labels or grouping nodes into machine config pools. - -This functionality takes advantage of `spec.providerID` node object values in the form of `<cloud-provider>://<cloud-provider-specific-id>` and writes the file `/var/lib/tuned/provider` with the value `<cloud-provider>` in NTO operand containers. The content of this file is then used by TuneD to load `provider-<cloud-provider>` profile if such profile exists. - -The `openshift` profile that both `openshift-control-plane` and `openshift-node` profiles inherit settings from is now updated to use this functionality through the use of conditional profile loading. Neither NTO nor TuneD currently include any Cloud provider-specific profiles. However, it is possible to create a custom profile `provider-<cloud-provider>` that will be applied to all Cloud provider-specific cluster nodes. - -.Example GCE Cloud provider profile -ifndef::rosa-hcp-tuning[] -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: provider-gce - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=GCE Cloud provider-specific profile - # Your tuning for GCE Cloud provider goes here. - name: provider-gce ----- -endif::rosa-hcp-tuning[] -ifdef::rosa-hcp-tuning[] -[source,JSON] ----- -{ - "apiVersion": "tuned.openshift.io/v1", - "kind": "Tuned", - "metadata": { - "name": "provider-gce", - "namespace": "openshift-cluster-node-tuning-operator" - }, - "spec": { - "profile": [ - { - "data": "[main]\nsummary=GCE Cloud provider-specific profile\n# Your tuning for GCE Cloud provider goes here.\n", - "name": "provider-gce" - } - ] - } -} ----- -endif::[] - -[NOTE] -==== -Due to profile inheritance, any setting specified in the `provider-<cloud-provider>` profile will be overwritten by the `openshift` profile and its child profiles. -==== diff --git a/modules/customize-certificates-add-service-serving-apiservice.adoc b/modules/customize-certificates-add-service-serving-apiservice.adoc deleted file mode 100644 index 5be455b0eac7..000000000000 --- a/modules/customize-certificates-add-service-serving-apiservice.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate-apiservice_{context}"] -= Add the service CA bundle to an API service - -You can annotate an `APIService` object with `service.beta.openshift.io/inject-cabundle=true` to have its `spec.caBundle` field populated with the service CA bundle. This allows the Kubernetes API server to validate the service CA certificate used to secure the targeted endpoint. - -.Procedure - -. Annotate the API service with `service.beta.openshift.io/inject-cabundle=true`: -+ -[source,terminal] ----- -$ oc annotate apiservice <api_service_name> \//<1> - service.beta.openshift.io/inject-cabundle=true ----- -<1> Replace `<api_service_name>` with the name of the API service to annotate. -+ -For example, use the following command to annotate the API service `test1`: -+ -[source,terminal] ----- -$ oc annotate apiservice test1 service.beta.openshift.io/inject-cabundle=true ----- - -. View the API service to ensure that the service CA bundle has been injected: -+ -[source,terminal] ----- -$ oc get apiservice <api_service_name> -o yaml ----- -+ -The CA bundle is displayed in the `spec.caBundle` field in the YAML output: -+ -[source,terminal] ----- -apiVersion: apiregistration.k8s.io/v1 -kind: APIService -metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" -... -spec: - caBundle: <CA_BUNDLE> -... ----- diff --git a/modules/customize-certificates-add-service-serving-configmap.adoc b/modules/customize-certificates-add-service-serving-configmap.adoc deleted file mode 100644 index 8fd6008e56e3..000000000000 --- a/modules/customize-certificates-add-service-serving-configmap.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate-configmap_{context}"] -= Add the service CA bundle to a config map - -A pod can access the service CA certificate by mounting a `ConfigMap` object that -is annotated with `service.beta.openshift.io/inject-cabundle=true`. -Once annotated, the cluster automatically injects the service CA -certificate into the `service-ca.crt` key on the config map. Access to -this CA certificate allows TLS clients to verify connections to -services using service serving certificates. - -[IMPORTANT] -==== -After adding this annotation to a config map all existing data in it is -deleted. It is recommended to use a separate config map to contain the -`service-ca.crt`, instead of using the same config map that stores your -pod configuration. -==== - -.Procedure - -. Annotate the config map with `service.beta.openshift.io/inject-cabundle=true`: -+ -[source,terminal] ----- -$ oc annotate configmap <config_map_name> \//<1> - service.beta.openshift.io/inject-cabundle=true ----- -<1> Replace `<config_map_name>` with the name of the config map to annotate. -+ -[NOTE] -==== -Explicitly referencing the `service-ca.crt` key in a volume mount will prevent a pod from starting until the config map has been injected with the CA bundle. This behavior can be overridden by setting the `optional` field to `true` for the volume's serving certificate configuration. -==== -+ -For example, use the following command to annotate the config map `test1`: -+ -[source,terminal] ----- -$ oc annotate configmap test1 service.beta.openshift.io/inject-cabundle=true ----- - -. View the config map to ensure that the service CA bundle has been injected: -+ -[source,terminal] ----- -$ oc get configmap <config_map_name> -o yaml ----- -+ -The CA bundle is displayed as the value of the `service-ca.crt` key in the YAML output: -+ -[source,terminal] ----- -apiVersion: v1 -data: - service-ca.crt: | - -----BEGIN CERTIFICATE----- -... ----- diff --git a/modules/customize-certificates-add-service-serving-crd.adoc b/modules/customize-certificates-add-service-serving-crd.adoc deleted file mode 100644 index 0414151a5aad..000000000000 --- a/modules/customize-certificates-add-service-serving-crd.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate-crd_{context}"] -= Add the service CA bundle to a custom resource definition - -You can annotate a `CustomResourceDefinition` (CRD) object with `service.beta.openshift.io/inject-cabundle=true` to have its `spec.conversion.webhook.clientConfig.caBundle` field populated with the service CA bundle. This allows the Kubernetes API server to validate the service CA certificate used to secure the targeted endpoint. - -[NOTE] -==== -The service CA bundle will only be injected into the CRD if the CRD is configured to use a webhook for conversion. It is only useful to inject the service CA bundle if a CRD's webhook is secured with a service CA certificate. -==== - -.Procedure - -. Annotate the CRD with `service.beta.openshift.io/inject-cabundle=true`: -+ -[source,terminal] ----- -$ oc annotate crd <crd_name> \//<1> - service.beta.openshift.io/inject-cabundle=true ----- -<1> Replace `<crd_name>` with the name of the CRD to annotate. -+ -For example, use the following command to annotate the CRD `test1`: -+ -[source,terminal] ----- -$ oc annotate crd test1 service.beta.openshift.io/inject-cabundle=true ----- - -. View the CRD to ensure that the service CA bundle has been injected: -+ -[source,terminal] ----- -$ oc get crd <crd_name> -o yaml ----- -+ -The CA bundle is displayed in the `spec.conversion.webhook.clientConfig.caBundle` field in the YAML output: -+ -[source,terminal] ----- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" -... -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: <CA_BUNDLE> -... ----- diff --git a/modules/customize-certificates-add-service-serving-mutating-webhook.adoc b/modules/customize-certificates-add-service-serving-mutating-webhook.adoc deleted file mode 100644 index 284eda698829..000000000000 --- a/modules/customize-certificates-add-service-serving-mutating-webhook.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate-mutating-webhook_{context}"] -= Add the service CA bundle to a mutating webhook configuration - -You can annotate a `MutatingWebhookConfiguration` object with `service.beta.openshift.io/inject-cabundle=true` to have the `clientConfig.caBundle` field of each webhook populated with the service CA bundle. This allows the Kubernetes API server to validate the service CA certificate used to secure the targeted endpoint. - -[NOTE] -==== -Do not set this annotation for admission webhook configurations that need to specify different CA bundles for different webhooks. If you do, then the service CA bundle will be injected for all webhooks. -==== - -.Procedure - -. Annotate the mutating webhook configuration with `service.beta.openshift.io/inject-cabundle=true`: -+ -[source,terminal] ----- -$ oc annotate mutatingwebhookconfigurations <mutating_webhook_name> \//<1> - service.beta.openshift.io/inject-cabundle=true ----- -<1> Replace `<mutating_webhook_name>` with the name of the mutating webhook configuration to annotate. -+ -For example, use the following command to annotate the mutating webhook configuration `test1`: -+ -[source,terminal] ----- -$ oc annotate mutatingwebhookconfigurations test1 service.beta.openshift.io/inject-cabundle=true ----- - -. View the mutating webhook configuration to ensure that the service CA bundle has been injected: -+ -[source,terminal] ----- -$ oc get mutatingwebhookconfigurations <mutating_webhook_name> -o yaml ----- -+ -The CA bundle is displayed in the `clientConfig.caBundle` field of all webhooks in the YAML output: -+ -[source,terminal] ----- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" -... -webhooks: -- myWebhook: - - v1beta1 - clientConfig: - caBundle: <CA_BUNDLE> -... ----- diff --git a/modules/customize-certificates-add-service-serving-validating-webhook.adoc b/modules/customize-certificates-add-service-serving-validating-webhook.adoc deleted file mode 100644 index 2cbc9e8c278d..000000000000 --- a/modules/customize-certificates-add-service-serving-validating-webhook.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate-validating-webhook_{context}"] -= Add the service CA bundle to a validating webhook configuration - -You can annotate a `ValidatingWebhookConfiguration` object with `service.beta.openshift.io/inject-cabundle=true` to have the `clientConfig.caBundle` field of each webhook populated with the service CA bundle. This allows the Kubernetes API server to validate the service CA certificate used to secure the targeted endpoint. - -[NOTE] -==== -Do not set this annotation for admission webhook configurations that need to specify different CA bundles for different webhooks. If you do, then the service CA bundle will be injected for all webhooks. -==== - -.Procedure - -. Annotate the validating webhook configuration with `service.beta.openshift.io/inject-cabundle=true`: -+ -[source,terminal] ----- -$ oc annotate validatingwebhookconfigurations <validating_webhook_name> \//<1> - service.beta.openshift.io/inject-cabundle=true ----- -<1> Replace `<validating_webhook_name>` with the name of the validating webhook configuration to annotate. -+ -For example, use the following command to annotate the validating webhook configuration `test1`: -+ -[source,terminal] ----- -$ oc annotate validatingwebhookconfigurations test1 service.beta.openshift.io/inject-cabundle=true ----- - -. View the validating webhook configuration to ensure that the service CA bundle has been injected: -+ -[source,terminal] ----- -$ oc get validatingwebhookconfigurations <validating_webhook_name> -o yaml ----- -+ -The CA bundle is displayed in the `clientConfig.caBundle` field of all webhooks in the YAML output: -+ -[source,terminal] ----- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" -... -webhooks: -- myWebhook: - - v1beta1 - clientConfig: - caBundle: <CA_BUNDLE> -... ----- diff --git a/modules/customize-certificates-add-service-serving.adoc b/modules/customize-certificates-add-service-serving.adoc deleted file mode 100644 index 346faa2a2949..000000000000 --- a/modules/customize-certificates-add-service-serving.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="add-service-certificate_{context}"] -= Add a service certificate - -To secure communication to your service, generate a signed serving certificate and key pair into a secret in the same namespace as the service. - -The generated certificate is only valid for the internal service DNS name `<service.name>.<service.namespace>.svc`, and is only valid for internal communications. If your service is a headless service (no `clusterIP` value set), the generated certificate also contains a wildcard subject in the format of `*.<service.name>.<service.namespace>.svc`. - -[IMPORTANT] -==== -Because the generated certificates contain wildcard subjects for headless services, you must not use the service CA if your client must differentiate between individual pods. In this case: - -* Generate individual TLS certificates by using a different CA. -* Do not accept the service CA as a trusted CA for connections that are directed to individual pods and must not be impersonated by other pods. These connections must be configured to trust the CA that was used to generate the individual TLS certificates. -==== - -.Prerequisites: - -* You must have a service defined. - -.Procedure - -. Annotate the service with `service.beta.openshift.io/serving-cert-secret-name`: -+ -[source,terminal] ----- -$ oc annotate service <service_name> \//<1> - service.beta.openshift.io/serving-cert-secret-name=<secret_name> //<2> ----- -<1> Replace `<service_name>` with the name of the service to secure. -<2> `<secret_name>` will be the name of the generated secret containing the -certificate and key pair. For convenience, it is recommended that this -be the same as `<service_name>`. -+ -For example, use the following command to annotate the service `test1`: -+ -[source,terminal] ----- -$ oc annotate service test1 service.beta.openshift.io/serving-cert-secret-name=test1 ----- - -. Examine the service to confirm that the annotations are present: -+ -[source,terminal] ----- -$ oc describe service <service_name> ----- -+ -.Example output -[source,terminal] ----- -... -Annotations: service.beta.openshift.io/serving-cert-secret-name: <service_name> - service.beta.openshift.io/serving-cert-signed-by: openshift-service-serving-signer@1556850837 -... ----- - -. After the cluster generates a secret for your service, your `Pod` spec can -mount it, and the pod will run after it becomes available. diff --git a/modules/customize-certificates-api-add-named.adoc b/modules/customize-certificates-api-add-named.adoc deleted file mode 100644 index 740ff7c7eaf8..000000000000 --- a/modules/customize-certificates-api-add-named.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/api-server.adoc - -:_content-type: PROCEDURE -[id="customize-certificates-api-add-named_{context}"] -= Add an API server named certificate - -The default API server certificate is issued by an internal {product-title} -cluster CA. You can add one or more alternative certificates that the API -server will return based on the fully qualified domain name (FQDN) requested by -the client, for example when a reverse proxy or load balancer is used. - -.Prerequisites - -* You must have a certificate for the FQDN and its corresponding private key. Each should be in a separate PEM format file. -* The private key must be unencrypted. If your key is encrypted, decrypt it -before importing it into {product-title}. -* The certificate must include the `subjectAltName` extension showing the FQDN. -* The certificate file can contain one or more certificates in a chain. The -certificate for the API server FQDN must be the first certificate in the file. -It can then be followed with any intermediate certificates, and the file should -end with the root CA certificate. - -[WARNING] -==== -Do not provide a named certificate for the internal load balancer (host -name `api-int.<cluster_name>.<base_domain>`). Doing so will leave your -cluster in a degraded state. -==== - -.Procedure - -. Login to the new API as the `kubeadmin` user. -+ -[source,terminal] ----- -$ oc login -u kubeadmin -p <password> https://FQDN:6443 ----- - -. Get the `kubeconfig` file. -+ -[source,terminal] ----- -$ oc config view --flatten > kubeconfig-newapi ----- - -. Create a secret that contains the certificate chain and private key in the -`openshift-config` namespace. -+ -[source,terminal] ----- -$ oc create secret tls <secret> \//<1> - --cert=</path/to/cert.crt> \//<2> - --key=</path/to/cert.key> \//<3> - -n openshift-config ----- -<1> `<secret>` is the name of the secret that will contain the certificate chain and private key. -<2> `</path/to/cert.crt>` is the path to the certificate chain on your local file system. -<3> `</path/to/cert.key>` is the path to the private key associated with this certificate. - -. Update the API server to reference the created secret. -+ -[source,terminal] ----- -$ oc patch apiserver cluster \ - --type=merge -p \ - '{"spec":{"servingCerts": {"namedCertificates": - [{"names": ["<FQDN>"], //<1> - "servingCertificate": {"name": "<secret>"}}]}}}' <2> ----- -<1> Replace `<FQDN>` with the FQDN that the API server should provide the certificate for. -<2> Replace `<secret>` with the name used for the secret in the previous step. - -. Examine the `apiserver/cluster` object and confirm the secret is now -referenced. -+ -[source,terminal] ----- -$ oc get apiserver cluster -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -spec: - servingCerts: - namedCertificates: - - names: - - <FQDN> - servingCertificate: - name: <secret> -... ----- - -. Check the `kube-apiserver` operator, and verify that a new revision of the Kubernetes API server rolls out. -It may take a minute for the operator to detect the configuration change and trigger a new deployment. -While the new revision is rolling out, `PROGRESSING` will report `True`. -+ -[source,terminal] ----- -$ oc get clusteroperators kube-apiserver ----- -+ -Do not continue to the next step until `PROGRESSING` is listed as `False`, as shown in the following output: -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -kube-apiserver {product-version}.0 True False False 145m ----- -+ -If `PROGRESSING` is showing `True`, wait a few minutes and try again. -+ -[NOTE] -==== -A new revision of the Kubernetes API server only rolls out if the API server named certificate is added for the first time. When the API server named certificate is renewed, a new revision of the Kubernetes API server does not roll out because the `kube-apiserver` pods dynamically reload the updated certificate. -==== diff --git a/modules/customize-certificates-manually-rotate-service-ca.adoc b/modules/customize-certificates-manually-rotate-service-ca.adoc deleted file mode 100644 index 9c5a0161ea5b..000000000000 --- a/modules/customize-certificates-manually-rotate-service-ca.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/certificates/service-signing-certificates.adoc - -:_content-type: PROCEDURE -[id="manually-rotate-service-ca_{context}"] -= Manually rotate the service CA certificate - -The service CA is valid for 26 months and is automatically refreshed when there is less than 13 months validity left. - -If necessary, you can manually refresh the service CA by using the following procedure. - -[WARNING] -==== -A manually-rotated service CA does not maintain trust with the previous service CA. You might experience a temporary service disruption until the pods in the cluster are restarted, which ensures that pods are using service serving certificates issued by the new service CA. -==== - -.Prerequisites - -* You must be logged in as a cluster admin. - -.Procedure - -. View the expiration date of the current service CA certificate by -using the following command. -+ -[source,terminal] ----- -$ oc get secrets/signing-key -n openshift-service-ca \ - -o template='{{index .data "tls.crt"}}' \ - | base64 --decode \ - | openssl x509 -noout -enddate ----- - -. Manually rotate the service CA. This process generates a new service CA -which will be used to sign the new service certificates. -+ -[source,terminal] ----- -$ oc delete secret/signing-key -n openshift-service-ca ----- - -. To apply the new certificates to all services, restart all the pods -in your cluster. This command ensures that all services use the -updated certificates. -+ -[source,terminal] ----- -$ for I in $(oc get ns -o jsonpath='{range .items[*]} {.metadata.name}{"\n"} {end}'); \ - do oc delete pods --all -n $I; \ - sleep 1; \ - done ----- -+ -[WARNING] -==== -This command will cause a service interruption, as it goes through and -deletes every running pod in every namespace. These pods will automatically -restart after they are deleted. -==== diff --git a/modules/customize-certificates-replace-default-router.adoc b/modules/customize-certificates-replace-default-router.adoc deleted file mode 100644 index 85672192b088..000000000000 --- a/modules/customize-certificates-replace-default-router.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/replacing-default-ingress-certificate.adoc - -:_content-type: PROCEDURE -[id="replacing-default-ingress_{context}"] -= Replacing the default ingress certificate - -You can replace the default ingress certificate for all -applications under the `.apps` subdomain. After you replace -the certificate, all applications, including the web console -and CLI, will have encryption provided by specified certificate. - -.Prerequisites - -* You must have a wildcard certificate for the fully qualified `.apps` subdomain -and its corresponding private key. Each should be in a separate PEM format file. -* The private key must be unencrypted. If your key is encrypted, decrypt it -before importing it into {product-title}. -* The certificate must include the `subjectAltName` extension showing -`*.apps.<clustername>.<domain>`. -* The certificate file can contain one or more certificates in a chain. The -wildcard certificate must be the first certificate in the file. It can then be -followed with any intermediate certificates, and the file should end with the -root CA certificate. -* Copy the root CA certificate into an additional PEM format file. - -.Procedure - -. Create a config map that includes only the root CA certificate used to sign the wildcard certificate: -+ -[source,terminal] ----- -$ oc create configmap custom-ca \ - --from-file=ca-bundle.crt=</path/to/example-ca.crt> \//<1> - -n openshift-config ----- -<1> `</path/to/example-ca.crt>` is the path to the root CA certificate file on your local file system. - -. Update the cluster-wide proxy configuration with the newly created config map: -+ -[source,terminal] ----- -$ oc patch proxy/cluster \ - --type=merge \ - --patch='{"spec":{"trustedCA":{"name":"custom-ca"}}}' ----- - -. Create a secret that contains the wildcard certificate chain and key: -+ -[source,terminal] ----- -$ oc create secret tls <secret> \//<1> - --cert=</path/to/cert.crt> \//<2> - --key=</path/to/cert.key> \//<3> - -n openshift-ingress ----- -<1> `<secret>` is the name of the secret that will contain the certificate chain -and private key. -<2> `</path/to/cert.crt>` is the path to the certificate chain on your local -file system. -<3> `</path/to/cert.key>` is the path to the private key associated -with this certificate. - -. Update the Ingress Controller configuration with the newly created -secret: -+ -[source,terminal] ----- -$ oc patch ingresscontroller.operator default \ - --type=merge -p \ - '{"spec":{"defaultCertificate": {"name": "<secret>"}}}' \//<1> - -n openshift-ingress-operator ----- -<1> Replace `<secret>` with the name used for the secret in -the previous step. diff --git a/modules/customize-certificates-rotate-service-serving.adoc b/modules/customize-certificates-rotate-service-serving.adoc deleted file mode 100644 index e79214866e98..000000000000 --- a/modules/customize-certificates-rotate-service-serving.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: PROCEDURE -[id="rotate-service-serving_{context}"] -= Manually rotate the generated service certificate - -You can rotate the service certificate by deleting the -associated secret. Deleting the secret results in a new one -being automatically created, resulting in a new certificate. - -.Prerequisites - -* A secret containing the certificate and key pair must -have been generated for the service. - -.Procedure - -. Examine the service to determine the secret containing the -certificate. This is found in the `serving-cert-secret-name` -annotation, as seen below. -+ -[source,terminal] ----- -$ oc describe service <service_name> ----- -+ -.Example output -[source,terminal] ----- -... -service.beta.openshift.io/serving-cert-secret-name: <secret> -... ----- - -. Delete the generated secret for the service. This process -will automatically recreate the secret. -+ -[source,terminal] ----- -$ oc delete secret <secret> //<1> ----- -<1> Replace `<secret>` with the name of the secret from the previous -step. - -. Confirm that the certificate has been recreated -by obtaining the new secret and examining the `AGE`. -+ -[source,terminal] ----- -$ oc get secret <service_name> ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE DATA AGE -<service.name> kubernetes.io/tls 2 1s ----- diff --git a/modules/customize-certificates-understanding-default-router.adoc b/modules/customize-certificates-understanding-default-router.adoc deleted file mode 100644 index 2ac5764f66b5..000000000000 --- a/modules/customize-certificates-understanding-default-router.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// security/certificates/replacing-default-ingress-certificate.adoc - -:_content-type: CONCEPT -[id="understanding-default-ingress_{context}"] -= Understanding the default ingress certificate - -By default, {product-title} uses the Ingress Operator to -create an internal CA and issue a wildcard certificate that is valid for -applications under the `.apps` sub-domain. Both the web console and CLI -use this certificate as well. - -The internal infrastructure CA certificates are self-signed. -While this process might be perceived as bad practice by some security or -PKI teams, any risk here is minimal. The only clients that implicitly -trust these certificates are other components within the cluster. -Replacing the default wildcard certificate with one that is issued by a -public CA already included in the CA bundle as provided by the container userspace -allows external clients to connect securely to applications running under the `.apps` sub-domain. diff --git a/modules/customize-certificates-understanding-service-serving.adoc b/modules/customize-certificates-understanding-service-serving.adoc deleted file mode 100644 index 990857d97dc7..000000000000 --- a/modules/customize-certificates-understanding-service-serving.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/service-serving-certificate.adoc - -:_content-type: CONCEPT -[id="understanding-service-serving_{context}"] -= Understanding service serving certificates - -Service serving certificates are intended to support complex -middleware applications that require encryption. These certificates are -issued as TLS web server certificates. - -The `service-ca` controller uses the `x509.SHA256WithRSA` signature -algorithm to generate service certificates. - -The generated certificate and key are in PEM format, stored in `tls.crt` -and `tls.key` respectively, within a created secret. The -certificate and key are automatically replaced when they get close to -expiration. - -The service CA certificate, which issues the service certificates, is valid for 26 months and is automatically rotated when there is less than 13 months validity left. After rotation, the previous service CA configuration is still trusted until its expiration. This allows a grace period for all affected services to refresh their key material before the expiration. If you do not upgrade your cluster during this grace period, which restarts services and refreshes their key material, you might need to manually restart services to avoid failures after the previous service CA expires. - -[NOTE] -==== -You can use the following command to manually restart all pods in the cluster. Be aware that running this command causes a service interruption, because it deletes every running pod in every namespace. These pods will automatically restart after they are deleted. - -[source,terminal] ----- -$ for I in $(oc get ns -o jsonpath='{range .items[*]} {.metadata.name}{"\n"} {end}'); \ - do oc delete pods --all -n $I; \ - sleep 1; \ - done ----- -==== diff --git a/modules/customizing-cli-downloads.adoc b/modules/customizing-cli-downloads.adoc deleted file mode 100644 index 4511b2322a48..000000000000 --- a/modules/customizing-cli-downloads.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="creating-custom-CLI-downloads_{context}"] -= Customizing CLI downloads - -You can configure links for downloading the CLI with custom link text and URLs, -which can point directly to file packages or to an external page that provides -the packages. - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. Navigate to *Administration* -> *Custom Resource Definitions*. - -. Select *ConsoleCLIDownload* from the list of Custom Resource Definitions (CRDs). - -. Click the *YAML* tab, and then make your edits: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleCLIDownload -metadata: - name: example-cli-download-links-for-foo -spec: - description: | - This is an example of download links for foo - displayName: example-foo - links: - - href: 'https://www.example.com/public/foo.tar' - text: foo for linux - - href: 'https://www.example.com/public/foo.mac.zip' - text: foo for mac - - href: 'https://www.example.com/public/foo.win.zip' - text: foo for windows ----- - -. Click the *Save* button. diff --git a/modules/customizing-project-request-message.adoc b/modules/customizing-project-request-message.adoc deleted file mode 100644 index 67a5cb425afa..000000000000 --- a/modules/customizing-project-request-message.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/configuring-project-creation.adoc - -:_content-type: PROCEDURE -[id="customizing-project-request-message_{context}"] -= Customizing the project request message - -When a developer or a service account that is unable to self-provision projects -makes a project creation request using the web console or CLI, the following -error message is returned by default: - -[source,terminal] ----- -You may not request a new project via this API. ----- - -Cluster administrators can customize this message. Consider updating it to -provide further instructions on how to request a new project specific to your -organization. For example: - -* To request a project, contact your system administrator at -[x-]`projectname@example.com`. -* To request a new project, fill out the project request form located at -[x-]`https://internal.example.com/openshift-project-request`. - -To customize the project request message: - -.Procedure - -. Edit the project configuration resource using the web console or CLI. - -** Using the web console: -... Navigate to the *Administration* -> *Cluster Settings* page. -... Click *Configuration* to view all configuration resources. -... Find the entry for *Project* and click *Edit YAML*. - -** Using the CLI: -... Log in as a user with `cluster-admin` privileges. -... Edit the `project.config.openshift.io/cluster` resource: -+ -[source,terminal] ----- -$ oc edit project.config.openshift.io/cluster ----- - -. Update the `spec` section to include the `projectRequestMessage` parameter and -set the value to your custom message: -+ -.Project configuration resource with custom project request message -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Project -metadata: - ... -spec: - projectRequestMessage: <message_string> ----- -+ -For example: - -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Project -metadata: - ... -spec: - projectRequestMessage: To request a project, contact your system administrator at projectname@example.com. ----- - -. After you save your changes, attempt to create a new project as a developer or -service account that is unable to self-provision projects to verify that your -changes were successfully applied. diff --git a/modules/customizing-the-jenkins-image-stream-tag.adoc b/modules/customizing-the-jenkins-image-stream-tag.adoc deleted file mode 100644 index 7ef7aaaa6e15..000000000000 --- a/modules/customizing-the-jenkins-image-stream-tag.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc -:_content-type: PROCEDURE - -[id="customizing-the-jenkins-image-stream-tag_{context}"] -= Customizing the Jenkins image stream tag - -To override the default upgrade behavior and control how the Jenkins image is upgraded, you set the image stream tag value that your Jenkins deployment configurations use. - -The default upgrade behavior is the behavior that existed when the Jenkins image was part of the install payload. The image stream tag names, `2` and `ocp-upgrade-redeploy`, in the `jenkins-rhel.json` image stream file use SHA-specific image references. Therefore, when those tags are updated with a new SHA, the {product-title} image change controller automatically redeploys the Jenkins deployment configuration from the associated templates, such as `jenkins-ephemeral.json` or `jenkins-persistent.json`. - -For new deployments, to override that default value, you change the value of the `JENKINS_IMAGE_STREAM_TAG` in the `jenkins-ephemeral.json` Jenkins template. For example, replace the `2` in `"value": "jenkins:2"` with one of the following image stream tags: - -* `ocp-upgrade-redeploy`, the default value, updates your Jenkins image when you upgrade {product-title}. -* `user-maintained-upgrade-redeploy` requires you to manually redeploy Jenkins by running `$ oc import-image jenkins:user-maintained-upgrade-redeploy -n openshift` after upgrading {product-title}. -* `scheduled-upgrade-redeploy` periodically checks the given `<image>:<tag>` combination for changes and upgrades the image when it changes. The image change controller pulls the changed image and redeploys the Jenkins deployment configuration provisioned by the templates. For more information about this scheduled import policy, see the "Adding tags to image streams" in the following "Additional resources." - -[NOTE] -==== -To override the current upgrade value for existing deployments, change the values of the environment variables that correspond to those template parameters. -==== - -.Prerequisites - -* You are running OpenShift Jenkins on {product-title} {product-version}. -* You know the namespace where OpenShift Jenkins is deployed. - -.Procedure - -* Set the image stream tag value, replacing `<namespace>` with namespace where OpenShift Jenkins is deployed and `<image_stream_tag>` with an image stream tag: -+ -.Example -[source,terminal] ----- -$ oc patch dc jenkins -p '{"spec":{"triggers":[{"type":"ImageChange","imageChangeParams":{"automatic":true,"containerNames":["jenkins"],"from":{"kind":"ImageStreamTag","namespace":"<namespace>","name":"jenkins:<image_stream_tag>"}}}]}}' ----- -+ -[TIP] -==== -Alternatively, to edit the Jenkins deployment configuration YAML, enter `$ oc edit dc/jenkins -n <namespace>` and update the `value: 'jenkins:<image_stream_tag>'` line. -==== diff --git a/modules/customizing-the-login-page.adoc b/modules/customizing-the-login-page.adoc deleted file mode 100644 index ec9889b4e7c3..000000000000 --- a/modules/customizing-the-login-page.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="customizing-the-login-page_{context}"] -= Customizing the login page - -Create Terms of Service information with custom login pages. Custom login pages -can also be helpful if you use a third-party login provider, such as GitHub or -Google, to show users a branded page that they trust and expect before being -redirected to the authentication provider. You can also render custom error -pages during the authentication process. - -[NOTE] -==== -Customizing the error template is limited to identity providers (IDPs) that use redirects, such as request header and OIDC-based IDPs. It does not have an effect on IDPs that use direct password authentication, such as LDAP and htpasswd. -==== - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. Run the following commands to create templates you can modify: -+ -[source,terminal] ----- -$ oc adm create-login-template > login.html ----- -+ -[source,terminal] ----- -$ oc adm create-provider-selection-template > providers.html ----- -+ -[source,terminal] ----- -$ oc adm create-error-template > errors.html ----- - -. Create the secrets: -+ -[source,terminal] ----- -$ oc create secret generic login-template --from-file=login.html -n openshift-config ----- -+ -[source,terminal] ----- -$ oc create secret generic providers-template --from-file=providers.html -n openshift-config ----- -+ -[source,terminal] ----- -$ oc create secret generic error-template --from-file=errors.html -n openshift-config ----- - -. Run: -+ -[source,terminal] ----- -$ oc edit oauths cluster ----- - -. Update the specification: -+ -[source,yaml] ----- -spec: - templates: - error: - name: error-template - login: - name: login-template - providerSelection: - name: providers-template ----- -+ -Run `oc explain oauths.spec.templates` to understand the options. diff --git a/modules/customizing-the-web-console-URL.adoc b/modules/customizing-the-web-console-URL.adoc deleted file mode 100644 index aada0fac09c4..000000000000 --- a/modules/customizing-the-web-console-URL.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="customizing-the-web-console-url_{context}"] -= Customizing console routes - -For `console` and `downloads` routes, custom routes functionality uses the `ingress` config route configuration API. If the `console` custom route is set up in both the `ingress` config and `console-operator` config, then the new `ingress` config custom route configuration takes precedent. The route configuration with the `console-operator` config is deprecated. - -[id="customizing-the-console-route_{context}"] -== Customizing the console route - -You can customize the console route by setting the custom hostname and TLS certificate in the `spec.componentRoutes` field of the cluster `Ingress` configuration. - -.Prerequisites - -* You have logged in to the cluster as a user with administrative privileges. -* You have created a secret in the `openshift-config` namespace containing the TLS certificate and key. This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. -+ -[TIP] -==== -You can create a TLS secret by using the `oc create secret tls` command. -==== - -.Procedure - -. Edit the cluster `Ingress` configuration: -+ -[source,terminal] ----- -$ oc edit ingress.config.openshift.io cluster ----- - -. Set the custom hostname and optionally the serving certificate and key: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - componentRoutes: - - name: console - namespace: openshift-console - hostname: <custom_hostname> <1> - servingCertKeyPairSecret: - name: <secret_name> <2> ----- -<1> The custom hostname. -<2> Reference to a secret in the `openshift-config` namespace that contains a TLS certificate (`tls.crt`) and key (`tls.key`). This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. - -. Save the file to apply the changes. - -[id="customizing-the-download-route_{context}"] -== Customizing the download route - -You can customize the download route by setting the custom hostname and TLS certificate in the `spec.componentRoutes` field of the cluster `Ingress` configuration. - -.Prerequisites - -* You have logged in to the cluster as a user with administrative privileges. -* You have created a secret in the `openshift-config` namespace containing the TLS certificate and key. This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. -+ -[TIP] -==== -You can create a TLS secret by using the `oc create secret tls` command. -==== - -.Procedure - -. Edit the cluster `Ingress` configuration: -+ -[source,terminal] ----- -$ oc edit ingress.config.openshift.io cluster ----- - -. Set the custom hostname and optionally the serving certificate and key: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - componentRoutes: - - name: downloads - namespace: openshift-console - hostname: <custom_hostname> <1> - servingCertKeyPairSecret: - name: <secret_name> <2> ----- -<1> The custom hostname. -<2> Reference to a secret in the `openshift-config` namespace that contains a TLS certificate (`tls.crt`) and key (`tls.key`). This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. - -. Save the file to apply the changes. diff --git a/modules/data-storage-management.adoc b/modules/data-storage-management.adoc deleted file mode 100644 index 5093c5df74ac..000000000000 --- a/modules/data-storage-management.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/optimizing-storage.adoc - -[id="data-storage-management_{context}"] -= Data storage management - -The following table summarizes the main directories that {product-title} components write data to. - -.Main directories for storing {product-title} data -[options="header,footer"] -|=== -|Directory|Notes|Sizing|Expected growth - -|*_/var/lib/etcd_* -|Used for etcd storage when storing the database. -|Less than 20 GB. - -Database can grow up to 8 GB. -|Will grow slowly with the environment. Only storing metadata. - -Additional 20-25 GB for every additional 8 GB of memory. - -|*_/var/lib/containers_* -|This is the mount point for the CRI-O runtime. Storage used for active container runtimes, including pods, and storage of local images. Not used for registry storage. -|50 GB for a node with 16 GB memory. Note that this sizing should not be used to determine minimum cluster requirements. - -Additional 20-25 GB for every additional 8 GB of memory. -|Growth is limited by capacity for running containers. - -|*_/var/lib/kubelet_* -|Ephemeral volume storage for pods. This includes anything external that is mounted into a container at runtime. Includes environment variables, kube secrets, and data volumes not backed by persistent volumes. -|Varies -|Minimal if pods requiring storage are using persistent volumes. If using ephemeral storage, this can grow quickly. - -|*_/var/log_* -|Log files for all components. -|10 to 30 GB. -|Log files can grow quickly; size can be managed by growing disks or by using log rotate. - -|=== diff --git a/modules/dedicated-accessing-your-cluster.adoc b/modules/dedicated-accessing-your-cluster.adoc deleted file mode 100644 index 1781bfe06286..000000000000 --- a/modules/dedicated-accessing-your-cluster.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/accessing-your-services.adoc - -[id="dedicated-accessing-your-cluster_{context}"] -= Accessing your cluster - -Use the following steps to access your {product-title} cluster. - -.Procedure - -. From {cluster-manager-url}, click on the cluster you want to access. - - . Click *Launch Console*. diff --git a/modules/dedicated-admin-granting-permissions.adoc b/modules/dedicated-admin-granting-permissions.adoc deleted file mode 100644 index b959243257fd..000000000000 --- a/modules/dedicated-admin-granting-permissions.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/dedicated-admin-role.adoc - -[id="dedicated-admin-granting-permissions_{context}"] -= Granting permissions to users or groups - -To grant permissions to other users or groups, you can add, or _bind_, a role to -them using the following commands: - -[source,terminal] ----- -$ oc adm policy add-role-to-user <role> <user_name> -$ oc adm policy add-role-to-group <role> <group_name> ----- diff --git a/modules/dedicated-aws-dc-existing.adoc b/modules/dedicated-aws-dc-existing.adoc deleted file mode 100644 index 878293949393..000000000000 --- a/modules/dedicated-aws-dc-existing.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-dc.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-dc-existing"] -= Connecting to an existing Direct Connect Gateway - -.Prerequisites - -* Confirm the CIDR range of the {product-title} VPC will not conflict with any other VGWs you have associated. -* Gather the following information: -** The Direct Connect Gateway ID. -** The AWS Account ID associated with the virtual interface. -** The BGP ASN assigned for the DXGateway. Optional: the Amazon default ASN may also be used. - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard and select the correct region. -. From the {product-title} AWS Account region, select *VPC* from the *Services* menu. -. From *VPN Connections*, select *Virtual Private Gateways*. -. Select *Create Virtual Private Gateway*. -. Give the Virtual Private Gateway a suitable name. -. Click *Custom ASN* and enter the *Amazon side ASN* value gathered previously or use the Amazon Provided ASN. -. Create the Virtual Private Gateway. -. In the *Navigation* pane of the {product-title} AWS Account Dashboard, choose *Virtual private gateways* and select the virtual private gateway. Choose *View details*. -. Choose *Direct Connect gateway associations* and click *Associate Direct Connect gateway*. -. Under *Association account type*, for Account owner, choose *Another account*. -. For *Direct Connect gateway owner*, enter the ID of the AWS account that owns the Direct Connect gateway. -. Under *Association settings*, for Direct Connect gateway ID, enter the ID of the Direct Connect gateway. -. Under *Association settings*, for Virtual interface owner, enter the ID of the AWS account that owns the virtual interface for the association. -. Optional: Add prefixes to Allowed prefixes, separating them using commas. -. Choose *Associate Direct Connect gateway*. -. After the Association Proposal has been sent, it will be waiting for your -acceptance. The final steps you must perform are available in the -link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html[AWS Documentation]. diff --git a/modules/dedicated-aws-dc-hvif.adoc b/modules/dedicated-aws-dc-hvif.adoc deleted file mode 100644 index a57b2273b2fa..000000000000 --- a/modules/dedicated-aws-dc-hvif.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-dc.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-dc-hvif"] -= Creating the hosted Virtual Interface - -.Prerequisites - -* Gather {product-title} AWS Account ID. - -[id="dedicated-aws-dc-hvif-type"] -== Determining the type of Direct Connect connection - -View the Direct Connect Virtual Interface details to determine the type of -connection. - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard and select the correct region. -. Select *Direct Connect* from the *Services* menu. -. There will be one or more Virtual Interfaces waiting to be accepted, select one of them to view the *Summary*. -. View the Virtual Interface type: private or public. -. Record the *Amazon side ASN* value. - -If the Direct Connect Virtual Interface type is Private, a Virtual Private -Gateway is created. If the Direct Connect Virtual Interface is Public, a Direct -Connect Gateway is created. - -[id="dedicated-aws-dc-hvif-private"] -== Creating a Private Direct Connect - -A Private Direct Connect is created if the Direct Connect Virtual Interface type -is Private. - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard and select the correct region. -. From the AWS region, select *VPC* from the *Services* menu. -. Select *Virtual Private Gateways* from *VPN Connections*. -. Click *Create Virtual Private Gateway*. -. Give the Virtual Private Gateway a suitable name. -. Select *Custom ASN* and enter the *Amazon side ASN* value gathered previously. -. Create the Virtual Private Gateway. -. Click the newly created Virtual Private Gateway and choose *Attach to VPC* from the *Actions* tab. -. Select the *{product-title} Cluster VPC* from the list, and attach the Virtual Private Gateway to the VPC. -. From the *Services* menu, click *Direct Connect*. Choose one of the Direct Connect Virtual Interfaces from the list. -. Acknowledge the *I understand that Direct Connect port charges apply once I click Accept Connection* message, then choose *Accept Connection*. -. Choose to *Accept* the Virtual Private Gateway Connection and select the Virtual Private Gateway that was created in the previous steps. -. Select *Accept* to accept the connection. -. Repeat the previous steps if there is more than one Virtual Interface. - -[id="dedicated-aws-dc-hvif-public"] -== Creating a Public Direct Connect - -A Public Direct Connect is created if the Direct Connect Virtual Interface type -is Public. - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard and select the correct region. -. From the {product-title} AWS Account region, select *Direct Connect* from the *Services* menu. -. Select *Direct Connect Gateways* and *Create Direct Connect Gateway*. -. Give the Direct Connect Gateway a suitable name. -. In the *Amazon side ASN*, enter the Amazon side ASN value gathered previously. -. Create the Direct Connect Gateway. -. Select *Direct Connect* from the *Services* menu. -. Select one of the Direct Connect Virtual Interfaces from the list. -. Acknowledge the *I understand that Direct Connect port charges apply once I click Accept Connection* message, then choose *Accept Connection*. -. Choose to *Accept* the Direct Connect Gateway Connection and select the Direct Connect Gateway that was created in the previous steps. -. Click *Accept* to accept the connection. -. Repeat the previous steps if there is more than one Virtual Interface. - -[id="dedicated-aws-dc-hvif-verifying"] -== Verifying the Virtual Interfaces - -After the Direct Connect Virtual Interfaces have been accepted, wait a short -period and view the status of the Interfaces. - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard and select the correct region. -. From the {product-title} AWS Account region, select *Direct Connect* from the *Services* menu. -. Select one of the Direct Connect Virtual Interfaces from the list. -. Check the Interface State has become *Available* -. Check the Interface BGP Status has become *Up*. -. Repeat this verification for any remaining Direct Connect Interfaces. - -After the Direct Connect Virtual Interfaces are available, you can log in to the -{product-title} AWS Account Dashboard and download the Direct Connect configuration file for -configuration on your side. diff --git a/modules/dedicated-aws-dc-methods.adoc b/modules/dedicated-aws-dc-methods.adoc deleted file mode 100644 index ee1aa058e17c..000000000000 --- a/modules/dedicated-aws-dc-methods.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-dc.adoc - -[id="dedicated-aws-dc-methods"] -= AWS Direct Connect methods - -A Direct Connect connection requires a hosted Virtual Interface (VIF) connected -to a Direct Connect Gateway (DXGateway), which is in turn associated to a -Virtual Gateway (VGW) or a Transit Gateway in order to access a remote VPC in -the same or another account. - -If you do not have an existing DXGateway, the typical process involves creating -the hosted VIF, with the DXGateway and VGW being created in the {product-title} AWS Account. - -If you have an existing DXGateway connected to one or more existing VGWs, the -process involves the {product-title} AWS Account sending an Association Proposal -to the DXGateway owner. The DXGateway owner must ensure that the proposed CIDR -will not conflict with any other VGWs they have associated. - -See the following AWS documentation for more details: - -* link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithVirtualInterfaces.html[Virtual Interfaces] -* link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways-intro.html[Direct Connect Gateways] -* link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html[Associating a VGW across accounts] - -[IMPORTANT] -==== -When connecting to an existing DXGateway, you are responsible for the -link:https://aws.amazon.com/directconnect/pricing/[costs]. -==== - -There are two configuration options available: - -[horizontal] -Method 1:: Create the hosted VIF and then the DXGateway and VGW. -Method 2:: Request a connection via an existing Direct Connect Gateway that you own. diff --git a/modules/dedicated-aws-vpc-accepting-peering.adoc b/modules/dedicated-aws-vpc-accepting-peering.adoc deleted file mode 100644 index d6fc3a90f00c..000000000000 --- a/modules/dedicated-aws-vpc-accepting-peering.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-peering.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpc-accepting-peering"] -= Accepting the VPC peer request - -After you create the VPC peering connection, you must accept the request in the -Customer AWS Account. - -.Prerequisites - -* Initiate the VPC peer request. - -.Procedure - -. Log in to the AWS Web Console. -. Navigate to *VPC Service*. -. Go to *Peering Connections*. -. Click on *Pending peering connection*. -. Confirm the AWS Account and VPC ID that the request originated from. This should -be from the {product-title} AWS Account and {product-title} Cluster VPC. -. Click *Accept Request*. diff --git a/modules/dedicated-aws-vpc-configuring-routing-tables.adoc b/modules/dedicated-aws-vpc-configuring-routing-tables.adoc deleted file mode 100644 index 13f250517ddf..000000000000 --- a/modules/dedicated-aws-vpc-configuring-routing-tables.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-peering.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpc-configuring-routing-tables"] -= Configuring the routing tables - -After you accept the VPC peering request, both VPCs must configure their routes -to communicate across the peering connection. - -.Prerequisites - -* Initiate and accept the VPC peer request. - -.Procedure - -. Log in to the AWS Web Console for the {product-title} AWS Account. -. Navigate to the *VPC Service*, then *Route Tables*. -. Select the Route Table for the {product-title} Cluster VPC. -+ -[NOTE] -==== -On some clusters, there may be more than one route table for a particular VPC. -Select the private one that has a number of explicitly associated subnets. -==== - -. Select the *Routes* tab, then *Edit*. -. Enter the Customer VPC CIDR block in the *Destination* text box. -. Enter the Peering Connection ID in the *Target* text box. -. Click *Save*. - -. You must complete the same process with the other VPC's CIDR block: -.. Log into the Customer AWS Web Console → *VPC Service* → *Route Tables*. -.. Select the Route Table for your VPC. -.. Select the *Routes* tab, then *Edit*. -.. Enter the {product-title} Cluster VPC CIDR block in the *Destination* text box. -.. Enter the Peering Connection ID in the *Target* text box. -.. Click *Save*. - -The VPC peering connection is now complete. Follow the verification procedure to -ensure connectivity across the peering connection is working. diff --git a/modules/dedicated-aws-vpc-initiating-peering.adoc b/modules/dedicated-aws-vpc-initiating-peering.adoc deleted file mode 100644 index 1ba72f76680c..000000000000 --- a/modules/dedicated-aws-vpc-initiating-peering.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-peering.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpc-initiating-peering"] -= Initiating the VPC peer request - -You can send a VPC peering connection request from the {product-title} AWS Account to the -Customer AWS Account. - -.Prerequisites - -* Gather the following information about the Customer VPC required to initiate the -peering request: -** Customer AWS account number -** Customer VPC ID -** Customer VPC Region -** Customer VPC CIDR -* Check the CIDR block used by the {product-title} Cluster VPC. If it overlaps or -matches the CIDR block for the Customer VPC, then peering between these two VPCs -is not possible; see the Amazon VPC -link:https://docs.aws.amazon.com/vpc/latest/peering/invalid-peering-configurations.html[Unsupported VPC Peering Configurations] -documentation for details. If the CIDR blocks do not overlap, you can continue -with the procedure. - -.Procedure - -. Log in to the Web Console for the {product-title} AWS Account and navigate to the -*VPC Dashboard* in the region where the cluster is being hosted. -. Go to the *Peering Connections* page and click the *Create Peering Connection* -button. -. Verify the details of the account you are logged in to and the details of the -account and VPC you are connecting to: -.. *Peering connection name tag*: Set a descriptive name for the VPC Peering Connection. -.. *VPC (Requester)*: Select the {product-title} Cluster VPC ID from the dropdown -*list. -.. *Account*: Select *Another account* and provide the Customer AWS Account number -*(without dashes). -.. *Region*: If the Customer VPC Region differs from the current region, select -*Another Region* and select the customer VPC Region from the dropdown list. -.. *VPC (Accepter)*: Set the Customer VPC ID. -. Click *Create Peering Connection*. -. Confirm that the request enters a *Pending* state. If it enters a *Failed* -state, confirm the details and repeat the process. diff --git a/modules/dedicated-aws-vpc-peering-terms.adoc b/modules/dedicated-aws-vpc-peering-terms.adoc deleted file mode 100644 index 159ded0b695f..000000000000 --- a/modules/dedicated-aws-vpc-peering-terms.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-peering.adoc - -[id="dedicated-aws-vpc-peering-terms"] -= VPC peering terms - -When setting up a VPC peering connection between two VPCs on two separate AWS -accounts, the following terms are used: - -[horizontal] -{product-title} AWS Account:: The AWS account that contains the {product-title} cluster. -{product-title} Cluster VPC:: The VPC that contains the {product-title} cluster. -Customer AWS Account:: Your non-{product-title} AWS Account that you would like to peer with. -Customer VPC:: The VPC in your AWS Account that you would like to peer with. -Customer VPC Region:: The region where the customer's VPC resides. - -[NOTE] -==== -As of July 2018, AWS supports inter-region VPC peering between all commercial regions link:https://aws.amazon.com/vpc/faqs/#Peering_Connections[excluding China]. -==== diff --git a/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc b/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc deleted file mode 100644 index 85f30e1b9b85..000000000000 --- a/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-peering.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpc-verifying-troubleshooting"] -= Verifying and troubleshooting VPC peering - -After you set up a VPC peering connection, it is best to confirm it has been -configured and is working correctly. - -.Prerequisites - -* Initiate and accept the VPC peer request. -* Configure the routing tables. - -.Procedure - -* In the AWS console, look at the route table for the cluster VPC that is peered. -Ensure that the steps for configuring the routing tables were followed and that -there is a route table entry pointing the VPC CIDR range destination to the -peering connection target. -+ -If the routes look correct on both the {product-title} Cluster VPC route table -and Customer VPC route table, then the connection should be tested using the -`netcat` method below. If the test calls are successful, then VPC peering is -working correctly. - -* To test network connectivity to an endpoint device, `nc` (or `netcat`) is a -helpful troubleshooting tool. It is included in the default image and provides -quick and clear output if a connection can be established: - -.. Create a temporary pod using the `busybox` image, which cleans up after itself: -+ ----- -$ oc run netcat-test \ - --image=busybox -i -t \ - --restart=Never --rm \ - -- /bin/sh ----- - -.. Check the connection using `nc`. -+ --- -* Example successful connection results: -+ ----- -/ nc -zvv 192.168.1.1 8080 -10.181.3.180 (10.181.3.180:8080) open -sent 0, rcvd 0 ----- - -* Example failed connection results: -+ ----- -/ nc -zvv 192.168.1.2 8080 -nc: 10.181.3.180 (10.181.3.180:8081): Connection refused -sent 0, rcvd 0 ----- --- - -.. Exit the container, which automatically deletes the Pod: -+ ----- -/ exit ----- diff --git a/modules/dedicated-aws-vpn-creating.adoc b/modules/dedicated-aws-vpn-creating.adoc deleted file mode 100644 index ed6974d85835..000000000000 --- a/modules/dedicated-aws-vpn-creating.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-vpn.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpn-creating"] -= Creating a VPN connection - -You can configure an Amazon Web Services (AWS) {product-title} cluster to use a -customer's on-site hardware VPN device using the following procedures. - -.Prerequisites - -* Hardware VPN gateway device model and software version, for example Cisco ASA -running version 8.3. See the Amazon VPC -link:https://docs.aws.amazon.com/vpc/latest/adminguide/Introduction.html#DevicesTested[Network Administrator Guide] -to confirm whether your gateway device is supported by AWS. -* Public, static IP address for the VPN gateway device. -* BGP or static routing: if BGP, the ASN is required. If static routing, you must -configure at least one static route. -* Optional: IP and Port/Protocol of a reachable service to test the VPN connection. - -[id="dedicated-aws-vpn-creating-configuring"] -== Configuring the VPN connection - -.Procedure - -. Log in to the {product-title} AWS Account Dashboard, and navigate to the VPC Dashboard. -. Click on *Your VPCs* and identify the name and VPC ID for the VPC containing the {product-title} cluster. -. From the VPC Dashboard, click *Customer Gateway*. -. Click *Create Customer Gateway* and give it a meaningful name. -. Select the routing method: *Dynamic* or *Static*. -. If Dynamic, enter the BGP ASN in the field that appears. -. Paste in the VPN gateway endpoint IP address. -. Click *Create*. -. If you do not already have a Virtual Private Gateway attached to the intended VPC: -.. From the VPC Dashboard, click on *Virtual Private Gateway*. -.. Click *Create Virtual Private Gateway*, give it a meaningful name, and click *Create*. -.. Leave the default Amazon default ASN. -.. Select the newly created gateway, click *Attach to VPC*, and attach it to the cluster VPC you identified earlier. - -[id="dedicated-aws-vpn-creating-establishing"] -== Establishing the VPN Connection - -.Procedure - -. From the VPC dashboard, click on *Site-to-Site VPN Connections*. -. Click *Create VPN Connection*. -.. Give it a meaningful name tag. -.. Select the virtual private gateway created previously. -.. For Customer Gateway, select *Existing*. -.. Select the customer gateway device by name. -.. If the VPN will use BGP, select *Dynamic*, otherwise select *Static*. Enter -Static IP CIDRs. If there are multiple CIDRs, add each CIDR as *Another Rule*. -.. Click *Create*. -.. Wait for VPN status to change to *Available*, approximately 5 to 10 minutes. -. Select the VPN you just created and click *Download Configuration*. -.. From the dropdown list, select the vendor, platform, and version of the customer -gateway device, then click *Download*. -.. The *Generic* vendor configuration is also available for retrieving information -in a plain text format. - -[NOTE] -==== -After the VPN connection has been established, be sure to set up Route -Propagation or the VPN may not function as expected. -==== - -[NOTE] -==== -Note the VPC subnet information, which you must add to your configuration as the -remote network. -==== - -[id="dedicated-aws-vpn-creating-propagation"] -== Enabling VPN route propagation - -After you have set up the VPN connection, you must ensure that route propagation -is enabled so that the necessary routes are added to the VPC's route table. - -.Procedure - -. From the VPC Dashboard, click on *Route Tables*. -. Select the private Route table associated with the VPC that contains your -{product-title} cluster. -+ -[NOTE] -==== -On some clusters, there may be more than one route table for a particular VPC. -Select the private one that has a number of explicitly associated subnets. -==== -. Click on the *Route Propagation* tab. -. In the table that appears, you should see the virtual private gateway you -created previously. Check the value in the *Propagate column*. -.. If Propagate is set to *No*, click *Edit route propagation*, check the Propagate -checkbox next to the virtual private gateway's name and click *Save*. - -After you configure your VPN tunnel and AWS detects it as *Up*, your static or -BGP routes are automatically added to the route table. diff --git a/modules/dedicated-aws-vpn-troubleshooting.adoc b/modules/dedicated-aws-vpn-troubleshooting.adoc deleted file mode 100644 index 3db525ad6248..000000000000 --- a/modules/dedicated-aws-vpn-troubleshooting.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-vpn.adoc - -[id="dedicated-aws-vpn-troubleshooting"] -= Troubleshooting the VPN connection - -[discrete] -[id="dedicated-aws-vpn-tunnel-down"] -== Tunnel does not connect - -If the tunnel connection is still *Down*, there are several things you can verify: - -* The AWS tunnel will not initiate a VPN connection. The connection attempt must be initiated from the Customer Gateway. -* Ensure that your source traffic is coming from the same IP as the configured customer gateway. AWS will silently drop all traffic to the gateway whose source IP address does not match. -* Ensure that your configuration matches values link:https://docs.aws.amazon.com/vpc/latest/adminguide/Introduction.html#CGRequirements[supported by AWS]. This includes IKE versions, DH groups, IKE lifetime, and more. -* Recheck the route table for the VPC. Ensure that propagation is enabled and that there are entries in the route table that have the virtual private gateway you created earlier as a target. -* Confirm that you do not have any firewall rules that could be causing an interruption. -* Check if you are using a policy-based VPN as this can cause complications depending on how it is configured. -* Further troubleshooting steps can be found at the link:https://aws.amazon.com/premiumsupport/knowledge-center/vpn-tunnel-troubleshooting/[AWS Knowledge Center]. - -[discrete] -[id="dedicated-aws-vpn-tunnel-stay-connected"] -== Tunnel does not stay connected - -If the tunnel connection has trouble staying *Up* consistently, know that all -AWS tunnel connections must be initiated from your gateway. AWS tunnels -link:https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html#CustomerGateway[do -not initiate tunneling]. - -Red Hat recommends setting up an SLA Monitor (Cisco ASA) or some device on your -side of the tunnel that constantly sends "interesting" traffic, for example -`ping`, `nc`, or `telnet`, at any IP address configured within the VPC CIDR -range. It does not matter whether the connection is successful, just that the -traffic is being directed at the tunnel. - -[discrete] -[id="dedicated-aws-vpn-secondary-tunnel-down"] -== Secondary tunnel in Down state - -When a VPN tunnel is created, AWS creates an additional failover tunnel. -Depending upon the gateway device, sometimes the secondary tunnel will be seen -as in the *Down* state. - -The AWS Notification is as follows: - ----- -You have new non-redundant VPN connections - -One or more of your vpn connections are not using both tunnels. This mode of -operation is not highly available and we strongly recommend you configure your -second tunnel. View your non-redundant VPN connections. ----- diff --git a/modules/dedicated-aws-vpn-verifying.adoc b/modules/dedicated-aws-vpn-verifying.adoc deleted file mode 100644 index d0a766715fea..000000000000 --- a/modules/dedicated-aws-vpn-verifying.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-vpn.adoc - -:_content-type: PROCEDURE -[id="dedicated-aws-vpn-verifying"] -= Verifying the VPN connection - -After you have set up your side of the VPN tunnel, you can verify that the -tunnel is up in the AWS console and that connectivity across the tunnel is -working. - -.Prerequisites - -* Created a VPN connection. - -.Procedure - -. *Verify the tunnel is up in AWS.* - -.. From the VPC Dashboard, click on *VPN Connections*. -.. Select the VPN connection you created previously and click the *Tunnel Details* tab. -.. You should be able to see that at least one of the VPN tunnels is *Up*. - -. *Verify the connection.* -+ -To test network connectivity to an endpoint device, `nc` (or `netcat`) is a -helpful troubleshooting tool. It is included in the default image and provides -quick and clear output if a connection can be established: - -.. Create a temporary pod using the `busybox` image, which cleans up after itself: -+ ----- -$ oc run netcat-test \ - --image=busybox -i -t \ - --restart=Never --rm \ - -- /bin/sh ----- - -.. Check the connection using `nc`. -+ --- -* Example successful connection results: -+ ----- -/ nc -zvv 192.168.1.1 8080 -10.181.3.180 (10.181.3.180:8080) open -sent 0, rcvd 0 ----- - -* Example failed connection results: -+ ----- -/ nc -zvv 192.168.1.2 8080 -nc: 10.181.3.180 (10.181.3.180:8081): Connection refused -sent 0, rcvd 0 ----- --- - -.. Exit the container, which automatically deletes the Pod: -+ ----- -/ exit ----- diff --git a/modules/dedicated-cluster-admin-enable.adoc b/modules/dedicated-cluster-admin-enable.adoc deleted file mode 100644 index 00259b718368..000000000000 --- a/modules/dedicated-cluster-admin-enable.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/cluster-admin-role.adoc - -[id="dedicated-cluster-admin-enable"] -= Enabling the cluster-admin role for your cluster - -The cluster-admin role must be enabled at the cluster level before it can be assigned to a user. - -.Prerequisites -. Open a technical support case with Red Hat to request that `cluster-admin` be enabled for your cluster. - -.Procedure -. In {cluster-manager}, select the cluster you want to assign cluster-admin privileges. -. Under the *Actions* dropdown menu, select *Allow cluster-admin access*. diff --git a/modules/dedicated-cluster-admin-grant.adoc b/modules/dedicated-cluster-admin-grant.adoc deleted file mode 100644 index bd2cd2c01c17..000000000000 --- a/modules/dedicated-cluster-admin-grant.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/cluster-admin-role.adoc - -[id="dedicated-cluster-admin-grant"] -= Granting the cluster-admin role to users - -After enabling cluster-admin rights on your cluster, you can assign the role to users. - -.Prerequisites -* Cluster access with cluster owner permissions - -.Procedure -. In {cluster-manager}, select the cluster you want to assign cluster-admin privileges. -. Under the *Access Control* tab, locate the *Cluster Administrative Users* section. Click *Add user*. -. After determining an appropriate User ID, select *cluster-admin* from the *Group* selection, then click *Add user*. -+ -[NOTE] -==== -Cluster-admin user creation can take several minutes to complete. -==== -+ -[NOTE] -==== -Existing dedicated-admin users cannot elevate their role to cluster-admin. A new user must be created with the cluster-admin role assigned. -==== diff --git a/modules/dedicated-cluster-install-deploy.adoc b/modules/dedicated-cluster-install-deploy.adoc deleted file mode 100644 index f4fe1c3c2f66..000000000000 --- a/modules/dedicated-cluster-install-deploy.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/dedicated-cluster-deploying.adoc - -[id="dedicated-cluster-install-deploy"] - -= Installing OpenShift Logging and OpenShift Elasticsearch Operators - -You can use the {product-title} console to install OpenShift Logging by deploying instances of -the OpenShift Logging and OpenShift Elasticsearch Operators. The Red Hat OpenShift Logging Operator -creates and manages the components of the logging stack. The OpenShift Elasticsearch Operator -creates and manages the Elasticsearch cluster used by OpenShift Logging. - -[NOTE] -==== -The OpenShift Logging solution requires that you install both the -Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator. When you deploy an instance -of the Red Hat OpenShift Logging Operator, it also deploys an instance of the OpenShift Elasticsearch -Operator. -==== - -Your OpenShift Dedicated cluster includes 600 GiB of persistent storage that is -exclusively available for deploying Elasticsearch for OpenShift Logging. - -Elasticsearch is a memory-intensive application. Each Elasticsearch node needs -16Gi of memory for both memory requests and limits. Each Elasticsearch node can -operate with a lower memory setting, though this is not recommended for -production deployments. - -.Procedure - -. Install the OpenShift Elasticsearch Operator from the OperatorHub: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. - -.. On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-logging*. -Then, click *Install*. - -. Install the Red Hat OpenShift Logging Operator from the OperatorHub: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. - -.. On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-logging*. -Then, click *Install*. - -. Verify the operator installations: - -.. Switch to the *Operators* → *Installed Operators* page. - -.. Ensure that *Red Hat OpenShift Logging* and *OpenShift Elasticsearch* Operators are listed in the -*openshift-logging* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an operator might display a *Failed* status. If the operator then installs with an *InstallSucceeded* message, -you can safely ignore the *Failed* message. -==== -+ -If either operator does not appear as installed, to troubleshoot further: -+ -* Switch to the *Operators* → *Installed Operators* page and inspect -the *Status* column for any errors or failures. -* Switch to the *Workloads* → *Pods* page and check the logs in each pod in the -`openshift-logging` project that is reporting issues. - -. Create and deploy an OpenShift Logging instance: - -.. Switch to the *Operators* → *Installed Operators* page. - -.. Click the installed *Red Hat OpenShift Logging* Operator. - -.. Under the *Details* tab, in the *Provided APIs* section, in the -*Cluster Logging* box, click *Create Instance* . Select *YAML View* and paste the following YAML definition into the window -that displays. -+ -.Cluster Logging custom resource (CR) -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: gp2 - size: "200Gi" - redundancyPolicy: "SingleRedundancy" - nodeSelector: - node-role.kubernetes.io/worker: "" - resources: - limits: - memory: "16Gi" - requests: - memory: "16Gi" - visualization: - type: "kibana" - kibana: - replicas: 1 - nodeSelector: - node-role.kubernetes.io/worker: "" - collection: - logs: - type: "fluentd" - fluentd: {} - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -.. Click *Create* to deploy the logging instance, which creates the Cluster -Logging and Elasticsearch custom resources. - -. Verify that the pods for the OpenShift Logging instance deployed: - -.. Switch to the *Workloads* → *Pods* page. - -.. Select the *openshift-logging* project. -+ -You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: -+ -* cluster-logging-operator-cb795f8dc-xkckc -* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz -* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv -* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g -* fluentd-2c7dg -* fluentd-9z7kk -* fluentd-br7r2 -* fluentd-fn2sb -* fluentd-pb2f8 -* fluentd-zqgqx -* kibana-7fb4fd4cc9-bvt4p - -. Access the OpenShift Logging interface, *Kibana*, from the *Observe* → -*Logging* page of the {product-title} web console. diff --git a/modules/dedicated-configuring-your-application-routes.adoc b/modules/dedicated-configuring-your-application-routes.adoc deleted file mode 100644 index 72bf3725d16f..000000000000 --- a/modules/dedicated-configuring-your-application-routes.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * welcome/accessing-your-services.adoc - -[id="dedicated-configuring-your-application-routes_{context}"] -= Configuring your application routes - -When your cluster is provisioned, an Elastic Load Balancing (ELB) load balancer is created -to route application traffic into the cluster. The domain for your ELB is configured to route application traffic via -`http(s)://*.<cluster-id>.<shard-id>.p1.openshiftapps.com`. The `<shard-id>` is a -random four-character string assigned to your cluster at creation time. - -If you want to use custom domain names for your application routes, {product-title} supports -CNAME records in your DNS configuration that point to -`elb.apps.<cluster-id>.<shard-id>.p1.openshiftapps.com`. While `elb` is recommended as a -reminder for where this record is pointing, you can use any string for this -value. You can create these CNAME records for each custom route you have, or you -can create a wildcard CNAME record. For example: - -[source,text] ----- -*.openshift.example.com CNAME elb.apps.my-example.a1b2.p1.openshiftapps.com ----- - -This allows you to create routes like *_app1.openshift.example.com_* and -*_app2.openshift.example.com_* without having to update your DNS every time. - -//// - -Customers with configured VPC peering or VPN connections have the option of -requesting a second ELB, so that application routes can be configured as -internal-only or externally available. The domain for this ELB will be identical -to the first, with a different `<shard-id>` value. By default, application -routes are handled by the internal-only router. To expose an application or -service externally, you must create a new route with a specific label, -`route=external`. - -To expose a new route for an existing service, apply the label `route=external` -and define a hostname that contains the secondary, public router shard ID: - ----- -$ oc expose service <service-name> -l route=external --name=<custom-route-name> --hostname=<custom-hostname>.<shard-id>.<cluster-id>.openshiftapps.com ----- - -Alternatively, you can use a custom domain: - ----- -$ oc expose service <service-name> -l route=external --name=<custom-route-name> --hostname=<custom-domain> ----- - -//// diff --git a/modules/dedicated-creating-your-cluster.adoc b/modules/dedicated-creating-your-cluster.adoc deleted file mode 100644 index fefa226154a9..000000000000 --- a/modules/dedicated-creating-your-cluster.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/accessing-your-services.adoc - -[id="dedicated-creating-your-cluster_{context}"] -= Creating your cluster - -Use the following steps to create your {product-title} cluster. - -.Procedure - -. Log in to {cluster-manager-url}. - -. Select *Create Cluster* -> *Red Hat OpenShift Dedicated*. - -. Enter your *Cluster name*, number of *Compute nodes*, and select an *AWS Region*. - -. Select your *Node Type*. The number and types of nodes available to you depend -upon your {product-title} subscription. - -. If you want to configure your networking IP ranges under *Advanced Options*, the -following are the default ranges available to use: - -.. Node CIDR: 10.0.0.0/16 - -.. Service CIDR: 172.30.0.0/16 - -.. Pod CIDR: 10.128.0.0/14 - -. Add your Identity provider by clicking the *Add OAuth Configuration* link or using the *Access Control* tab. - -. Add a _Dedicated Admin_ user by clicking the *Access Control* tab, then *Add User*. - -. Input the user's name, then click *Add*. - -In the *Overview* tab under the *Details* heading will have a *Status* -indicator. This will indicate that your cluster is *Ready* for use. diff --git a/modules/dedicated-enable-private-cluster-existing.adoc b/modules/dedicated-enable-private-cluster-existing.adoc deleted file mode 100644 index 01edb0d53b12..000000000000 --- a/modules/dedicated-enable-private-cluster-existing.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc - -[id="dedicated-enable-private-cluster-existing"] -= Enabling private cluster on an existing cluster - -You can enable private clusters after a cluster has been created: - -.Prerequisites - -* AWS VPC Peering, VPN, DirectConnect, or TransitGateway has been configured to allow private access. - -.Procedure - -. Access your cluster in {cluster-manager}. -. Navigate to the *Networking* tab. -. Select *Make API private* under *Master API endpoint* and click *Change settings*. -+ -[NOTE] -==== -Transitioning your cluster between private and public can take several minutes to complete. -==== diff --git a/modules/dedicated-enable-private-cluster-new.adoc b/modules/dedicated-enable-private-cluster-new.adoc deleted file mode 100644 index f711bea16ff4..000000000000 --- a/modules/dedicated-enable-private-cluster-new.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc - -[id="dedicated-enable-private-cluster-new"] -= Enabling private cluster on a new cluster - -You can enable private cluster settings when creating a new cluster: - -.Prerequisites - -* AWS VPC Peering, VPN, DirectConnect, or TransitGateway has been configured to allow private access. - -.Procedure - -. In {cluster-manager-first}, click *Create cluster* and select *{product-title}*. -. Configure your cluster details, then select *Advanced* in the Networking section. -. Determine your CIDR requirements for your network and input the required fields. -+ -[IMPORTANT] -==== -CIDR configurations cannot be changed later. Confirm your selections with your network administrator before proceeding. -==== -. Under *Cluster Privacy*, select *Private*. diff --git a/modules/dedicated-enable-public-cluster.adoc b/modules/dedicated-enable-public-cluster.adoc deleted file mode 100644 index 9ac737c6ecbc..000000000000 --- a/modules/dedicated-enable-public-cluster.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc - -[id="dedicated-enable-public-cluster"] -= Enabling public cluster on a private cluster - -You can set a private cluster to public facing: - -.Procedure - -. Access your cluster in {cluster-manager-first}. -. Navigate to the *Networking* tab. -. Deselect *Make API private* under *Master API endpoint* and click *Change settings*. -+ -[NOTE] -==== -Transitioning your cluster between private and public can take several minutes to complete. -==== diff --git a/modules/dedicated-exposing-TCP-services.adoc b/modules/dedicated-exposing-TCP-services.adoc deleted file mode 100644 index d514d1d68221..000000000000 --- a/modules/dedicated-exposing-TCP-services.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * welcome/accessing-your-services.adoc - -[id="dedicated-exposing-TCP-services_{context}"] -= Exposing TCP services - -{product-title} routes expose applications by proxying traffic through -HTTP/HTTPS(SNI)/TLS(SNI) to pods and services. A -link:https://kubernetes.io/docs/concepts/services-networking/#loadbalancer[LoadBalancer] -service creates an Elastic Load Balancing (ELB) load balancer for your {product-title} -cluster, enabling direct TCP access to applications exposed by your LoadBalancer -service. - -[NOTE] -==== -LoadBalancer services require an additional purchase. Contact your sales team if -you are interested in using LoadBalancer services for your {product-title} -cluster. -==== - -== Checking your LoadBalancer Quota - -By purchasing LoadBalancer services, you are provided with a quota of -LoadBalancers available for your {product-title} cluster. - -[source,terminal] ----- -$ oc describe clusterresourcequota loadbalancer-quota ----- - -.Example output -[source,text] ----- -Name: loadbalancer-quota -Labels: <none> -... -Resource Used Hard --------- ---- ---- -services.loadbalancers 0 4 ----- - -== Exposing TCP service - -You can expose your applications over an external LoadBalancer service, enabling -access over the public internet. - -[source,terminal] ----- -$ oc expose dc httpd-example --type=LoadBalancer --name=lb-service ----- - -.Example output -[source,text] ----- -service/lb-service created ----- - -== Creating an internal-only TCP service - -You can alternatively expose your applications internally only, enabling access -only through AWS VPC Peering or a VPN connection. - -[source,terminal] ----- -$ oc expose dc httpd-example --type=LoadBalancer --name=internal-lb --dry-run -o yaml | awk '1;/metadata:/{ print " annotations:\n service.beta.kubernetes.io/aws-load-balancer-internal: \"true\"" }' | oc create -f - ----- - -.Example output -[source,terminal] ----- -service/internal-lb created ----- - -== Enabling LoadBalancer access logs - -You may, optionally, create an S3 bucket within your own AWS account, and configure the LoadBalancer service to send access logs to this S3 bucket at predefined intervals. - -=== Prerequisites - -You must first create the S3 bucket within your own AWS account, in the same AWS region that your {product-title} cluster is deployed. This S3 bucket can be configured with all public access blocked, including system permissions. After your S3 bucket is created, you must attach a policy to your bucket as https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy[outlined by AWS]. - -=== Configuring the LoadBalancer service - -Update and apply the following annotations to your service YAML definition, prior to creating the object in your cluster. - -[source,yaml] ----- -metadata: - name: my-service - annotations: - # Specifies whether access logs are enabled for the load balancer - service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true" - # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes). - service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60" - # The name of the Amazon S3 bucket where the access logs are stored - service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket" - # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod` - # This must match the prefix specified in the S3 policy - service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod" ----- - -=== Creating the LoadBalancer service - -Once the annotations have been saved into a YAML file, you can create it from the command line: - -[source,terminal] ----- -$ oc create -f loadbalancer.yaml ----- - -.Example output -[source,text] ----- -service/my-service created ----- - -== Using your TCP Service - -Once your LoadBalancer service is created, you can access your service by using -the URL provided to you by {product-title}. The `LoadBalancer Ingress` value is -a URL unique to your service that remains static as long as the service is not -deleted. If you prefer to use a custom domain, you can create a CNAME DNS record -for this URL. - -[source,terminal] ----- -$ oc describe svc lb-service ----- - -.Example output -[source,text] ----- -Name: lb-service -Namespace: default -Labels: app=httpd-example -Annotations: <none> -Selector: name=httpd-example -Type: LoadBalancer -IP: 10.120.182.252 -LoadBalancer Ingress: a5387ba36201e11e9ba901267fd7abb0-1406434805.us-east-1.elb.amazonaws.com -Port: <unset> 8080/TCP -TargetPort: 8080/TCP -NodePort: <unset> 31409/TCP -Endpoints: <none> -Session Affinity: None -External Traffic Policy: Cluster ----- diff --git a/modules/dedicated-logging-in-and-verifying-permissions.adoc b/modules/dedicated-logging-in-and-verifying-permissions.adoc deleted file mode 100644 index cc077569a2cd..000000000000 --- a/modules/dedicated-logging-in-and-verifying-permissions.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/dedicated-admin-role.adoc - -[id="dedicated-admin-logging-in-verifying-permissions_{context}"] -= Logging in and verifying permissions - -You can log in as an {product-title} cluster administration via the web console -or CLI, just as you would if you were an application developer. - -When you log in to the web console, all user-created projects across the cluster -are visible from the main *Projects* page. - -Use the standard `oc login` command to log in with the CLI: - -[source,terminal] ----- -$ oc login <your_instance_url> ----- - -All projects are visible using: - -[source,terminal] ----- -$ oc get projects ----- - -When your account has the `dedicated-admins-cluster` cluster role bound to it, -you are automatically bound to the `dedicated-admins-project` for any new -projects that are created by users in the cluster. - -To verify if your account has administrator privileges, run the following -command against a user-created project to view its default role bindings. If you -are a cluster administrator, you will see your account listed under subjects for -the `dedicated-admins-project-0` and `dedicated-admins-project-1` role bindings -for the project: - -[source,terminal] ----- -$ oc describe rolebinding.rbac -n <project_name> ----- - -.Example output -[source,text] ----- -Name: admin -Labels: <none> -Annotations: <none> -Role: - Kind: ClusterRole - Name: admin -Subjects: - Kind Name Namespace - ---- ---- --------- - User fred@example.com <1> - - -Name: dedicated-admins-project -Labels: <none> -Annotations: <none> -Role: - Kind: ClusterRole - Name: dedicated-admins-project -Subjects: - Kind Name Namespace - ---- ---- --------- - User alice@example.com <2> - User bob@example.com <2> -... ----- -<1> The `fred@example.com` user is a normal, project-scoped administrator for -this project. -<2> The `alice@example.com` and `bob@example.com` users are cluster -administrators. - -To view details on your increased permissions, and the sets of -verbs and resources associated with the `dedicated-admins-cluster` and -`dedicated-admins-project` roles, run the following: - -[source,terminal] ----- -$ oc describe clusterrole.rbac dedicated-admins-cluster -$ oc describe clusterrole.rbac dedicated-admins-project ----- diff --git a/modules/dedicated-managing-dedicated-administrators.adoc b/modules/dedicated-managing-dedicated-administrators.adoc deleted file mode 100644 index eb27a22a3c19..000000000000 --- a/modules/dedicated-managing-dedicated-administrators.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/dedicated-admin-role.adoc - -[id="dedicated-managing-dedicated-administrators_{context}"] -= Managing {product-title} administrators - -Administrator roles are managed using a `dedicated-admins` group on the cluster. Existing members of this group can edit membership via {cluster-manager-url}. - -[id="dedicated-administrators-adding-user_{context}"] -== Adding a user -. Navigate to the *Cluster Details* page and *Access Control* tab. -. Click *Add user*. (first user only) -. Enter the user name and select the group (*dedicated-admins*) -. Click *Add*. - -[id="dedicated-administrators-removing-user_{context}"] -== Removing a user -. Navigate to the *Cluster Details* page and *Access Control* tab. -. Click the 3 vertical dots to the right of the user / group combination to show a menu, then click on *Delete*. diff --git a/modules/dedicated-managing-quotas-and-limit-ranges.adoc b/modules/dedicated-managing-quotas-and-limit-ranges.adoc deleted file mode 100644 index 01b546be893a..000000000000 --- a/modules/dedicated-managing-quotas-and-limit-ranges.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/dedicated-admin-role.adoc - -[id="dedicated-managing-quotas-and-limit-ranges_{context}"] -= Managing quotas and limit ranges - -As an administrator, you are able to view, create, and modify quotas and limit -ranges on other projects. This allows you to better constrain how compute -resources and objects are consumed by users across the cluster. diff --git a/modules/dedicated-managing-service-accounts.adoc b/modules/dedicated-managing-service-accounts.adoc deleted file mode 100644 index 8a0264f21be4..000000000000 --- a/modules/dedicated-managing-service-accounts.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/dedicated-admin-role.adoc - -[id="dedicated-managing-service-accounts_{context}"] -= Managing service accounts - -Service accounts are API objects that exist within each project. To manage -service accounts, you can use the `oc` command with the `sa` or `serviceaccount` -object type or use the web console. - -The *dedicated-admin* service creates the *dedicated-admins* group. This group is -granted the roles at the cluster or individual project level. Users can be -assigned to this group and group membership defines who has OpenShift Dedicated -administrator access. However, by design, service accounts cannot be added to -regular groups. - -Instead, the dedicated-admin service creates a special project for this purpose -named *dedicated-admin*. The service account group for this project is granted -OpenShift Dedicated *admin* roles, granting OpenShift Dedicated administrator -access to all service accounts within the *dedicated-admin* project. These service -accounts can then be used to perform any actions that require OpenShift -Dedicated administrator access. - -Users that are members of the *dedicated-admins* group, and thus have been granted -the *dedicated-admin* role, have `edit` access to the *dedicated-admin* project. This -allows these users to manage the service accounts in this project and create new -ones as needed. - -To get a list of existing service accounts in the current project, run: - -[source,terminal] ----- -$ oc get sa ----- - -.Example output -[source,text] ----- -NAME SECRETS AGE -builder 2 2d -default 2 2d -deployer 2 2d ----- - -To create a new service account, run: - -[source,terminal] ----- -$ oc create sa <service-account-name> ----- - -As soon as a service account is created, two secrets are automatically added to -it: - -* an API token -* credentials for the OpenShift Container Registry - -These can be seen by describing the service account: - -[source,terminal] ----- -$ oc describe sa <service-account-name> ----- - -The system ensures that service accounts always have an API token and registry -credentials. - -The generated API token and registry credentials do not expire, but they can be -revoked by deleting the secret. When the secret is deleted, a new one is -automatically generated to take its place. diff --git a/modules/dedicated-scaling-your-cluster.adoc b/modules/dedicated-scaling-your-cluster.adoc deleted file mode 100644 index b2d7a5ce1101..000000000000 --- a/modules/dedicated-scaling-your-cluster.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/scaling_your_cluster.adoc - -[id="dedicated-scaling-your-cluster_{context}"] -= Scaling your cluster - -To scale your {product-title} cluster: - -. From {cluster-manager-url}, click on the cluster you want to resize. - -. Click *Actions*, then *Scale Cluster*. - -. Select how many compute nodes are required, then click *Apply*. - -Scaling occurs automatically. In the *Overview* tab under the *Details* heading,the *Status* indicator shows that your cluster is *Ready* for use. diff --git a/modules/dedicated-storage-expanding-filesystem-pvc.adoc b/modules/dedicated-storage-expanding-filesystem-pvc.adoc deleted file mode 100644 index a8e88df9af5f..000000000000 --- a/modules/dedicated-storage-expanding-filesystem-pvc.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/expanding-persistent-volume.adoc - -[id="dedicated-storage-expanding-filesystem-pvc_{context}"] -= Expanding {product-title} Persistent Volume Claims (PVCs) - -Expanding PVCs based on volume types that need file system re-sizing, -such as AWS EBS, is a two-step process. -This process involves expanding volume objects in the cloud provider and -then expanding the file system on the actual node. These steps occur automatically -after the PVC object is edited and might require a pod restart to take effect. - -Expanding the file system on the node only happens when a new pod is started -with the volume. - -.Prerequisites - -* The controlling StorageClass must have `allowVolumeExpansion` set -to `true`. This is the default configuration in {product-title}. - -+ -[IMPORTANT] -==== -Decreasing the size of an Amazon Elastic Block Store (EBS) volume is not supported. However, you -can create a smaller volume and then migrate your data to it by using a -tool such as `oc rsync`. After modifying a volume, you must wait at least six hours before -making additional modifications to the same volume. -==== - -.Procedure - -. Edit the PVC and request a new size by editing the `spec.resources.requests.storage` -value. The following `oc patch` command will change the PVC's size: -+ ----- -$ oc patch pvc <pvc_name> -p '{"spec": {"resources": {"requests": {"storage": "8Gi"}}}}' ----- - -. After the cloud provider object has finished re-sizing, the PVC might be set to -`FileSystemResizePending`. The following command is used to check -the condition: -+ ----- -$ oc describe pvc mysql - -Name: mysql -Namespace: my-project -StorageClass: gp2 -Status: Bound -Volume: pvc-5fa3feb4-7115-4735-8652-8ebcfec91bb9 -Labels: app=cakephp-mysql-persistent - template=cakephp-mysql-persistent - template.openshift.io/template-instance-owner=6c7f7c56-1037-4105-8c08-55a6261c39ca -Annotations: pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/aws-ebs - volume.kubernetes.io/selected-node: ip-10-0-128-221.us-east-2.compute.internal - volume.kubernetes.io/storage-resizer: kubernetes.io/aws-ebs -Finalizers: [kubernetes.io/pvc-protection] -Capacity: 1Gi <1> -Access Modes: RWO -VolumeMode: Filesystem -Conditions: <2> - Type Status LastProbeTime LastTransitionTime Reason Message - ---- ------ ----------------- ------------------ ------ ------- - FileSystemResizePending True <Timestamp> <Timestamp> Waiting for user to (re-)start a Pod to - finish file system resize of volume on node. -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal WaitForFirstConsumer 36m persistentvolume-controller waiting for first consumer to be created before binding - Normal ProvisioningSucceeded 36m persistentvolume-controller Successfully provisioned volume - pvc-5fa3feb4-7115-4735-8652-8ebcfec91bb9 using - kubernetes.io/aws-ebs -Mounted By: mysql-1-q4nz7 <3> ----- -<1> The current capacity of the PVC. -<2> Any relevant conditions are displayed here. -<3> The pod that is currently mounting this volume - -. If the output of the previous command included a message to restart the pod, delete the mounting pod that it specified: -+ ----- -$ oc delete pod mysql-1-q4nz7 ----- - -. After the pod is running, the newly requested size is available and the -`FileSystemResizePending` condition is removed from the PVC. diff --git a/modules/defining-template-for-external-log-link.adoc b/modules/defining-template-for-external-log-link.adoc deleted file mode 100644 index d18d19988232..000000000000 --- a/modules/defining-template-for-external-log-link.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="defining-template-for-external-log-links_{context}"] -= Defining a template for an external log link - -If you are connected to a service that helps you browse your logs, but you need -to generate URLs in a particular way, then you can define a template for your -link. - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on -*ConsoleExternalLogLink*. -. Select *Instances* tab -. Click *Create Console External Log Link* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleExternalLogLink -metadata: - name: example -spec: - hrefTemplate: >- - https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels} - text: Example Logs ----- diff --git a/modules/delete-hosted-cluster.adoc b/modules/delete-hosted-cluster.adoc deleted file mode 100644 index 073e88185ba8..000000000000 --- a/modules/delete-hosted-cluster.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assembly: -// -// * hosted_control_planes/hcp-managing.adoc - -:_content-type: PROCEDURE -[id="delete-hosted-cluster_{context}"] -= Deleting a hosted cluster - -The steps to delete a hosted cluster differ depending on which provider you use. - -.Procedure - -* If the cluster is on AWS, follow the instructions in link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-cluster-destroy-aws[Destroying a hosted cluster on AWS]. - -* If the cluster is on bare metal, follow the instructions in link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-cluster-destroy-bm[Destroying a hosted cluster on bare metal]. - -* If the cluster is on {VirtProductName}, follow the instructions in link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-cluster-destroy-kubevirt[Destroying a hosted cluster on OpenShift Virtualization]. - -.Next steps - -If you want to disable the hosted control plane feature, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#disable-hosted-control-planes[Disabling the hosted control plane feature]. \ No newline at end of file diff --git a/modules/delete-kn-trigger.adoc b/modules/delete-kn-trigger.adoc deleted file mode 100644 index 784862981990..000000000000 --- a/modules/delete-kn-trigger.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * /serverless/eventing/triggers/delete-triggers-cli.adoc - -:_content-type: PROCEDURE -[id="delete-kn-trigger_{context}"] -= Deleting a trigger by using the Knative CLI - -You can use the `kn trigger delete` command to delete a trigger. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on your {product-title} cluster. -* You have installed the Knative (`kn`) CLI. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. - -.Procedure - -* Delete a trigger: -+ -[source,terminal] ----- -$ kn trigger delete <trigger_name> ----- - -.Verification - -. List existing triggers: -+ -[source,terminal] ----- -$ kn trigger list ----- - -. Verify that the trigger no longer exists: -+ -.Example output -[source,terminal] ----- -No triggers found. ----- diff --git a/modules/deleting-a-namespace-using-the-web-console.adoc b/modules/deleting-a-namespace-using-the-web-console.adoc deleted file mode 100644 index b2d87dab26aa..000000000000 --- a/modules/deleting-a-namespace-using-the-web-console.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/install/uninstalling-virt.adoc - -:_content-type: PROCEDURE -[id="deleting-a-namespace-using-the-web-console_{context}"] -= Deleting a namespace using the web console - -You can delete a namespace by using the {product-title} web console. - -.Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. - -.Procedure - -. Navigate to *Administration* -> *Namespaces*. - -. Locate the namespace that you want to delete in the list of namespaces. - -. On the far right side of the namespace listing, select *Delete Namespace* from the -Options menu {kebab}. - -. When the *Delete Namespace* pane opens, enter the name of the namespace that -you want to delete in the field. - -. Click *Delete*. diff --git a/modules/deleting-a-project-using-the-CLI.adoc b/modules/deleting-a-project-using-the-CLI.adoc deleted file mode 100644 index 08308f10b8aa..000000000000 --- a/modules/deleting-a-project-using-the-CLI.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="deleting-a-project-using-the-CLI_{context}"] -= Deleting a project using the CLI - -When you delete a project, the server updates the project status to -*Terminating* from *Active*. Then, the server clears all content from a project -that is in the *Terminating* state before finally removing the project. While a -project is in *Terminating* status, you cannot add new content to the project. -Projects can be deleted from the CLI or the web console. - -.Procedure - -. Run: -+ -[source,terminal] ----- -$ oc delete project <project_name> ----- diff --git a/modules/deleting-a-project-using-the-web-console.adoc b/modules/deleting-a-project-using-the-web-console.adoc deleted file mode 100644 index a06b7e2e7d1a..000000000000 --- a/modules/deleting-a-project-using-the-web-console.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="deleting-a-project-using-the-web-console_{context}"] -= Deleting a project using the web console - -You can delete a project by using the {product-title} web console. - -[NOTE] -==== -If you do not have permissions to delete the project, the *Delete Project* -option is not available. -==== - -.Procedure - -. Navigate to *Home* -> *Projects*. - -. Locate the project that you want to delete from the list of projects. - -. On the far right side of the project listing, select *Delete Project* from the -Options menu {kebab}. - -. When the *Delete Project* pane opens, enter the name of the project that -you want to delete in the field. - -. Click *Delete*. \ No newline at end of file diff --git a/modules/deleting-cluster.adoc b/modules/deleting-cluster.adoc deleted file mode 100644 index 57eb621134ba..000000000000 --- a/modules/deleting-cluster.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/osd-deleting-a-cluster.adoc -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="deleting-cluster_{context}"] -= Deleting your cluster - -You can delete your {product-title} cluster in {cluster-manager-first}. - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. - -.Procedure - -. From {cluster-manager-url}, click on the cluster you want to delete. - -. Select *Delete cluster* from the *Actions* drop-down menu. - -. Type the name of the cluster highlighted in bold, then click *Delete*. Cluster deletion occurs automatically. diff --git a/modules/deleting-machine-pools-cli.adoc b/modules/deleting-machine-pools-cli.adoc deleted file mode 100644 index dc445f69d4fb..000000000000 --- a/modules/deleting-machine-pools-cli.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="deleting-machine-pools-cli{context}"] -= Deleting a machine pool using the ROSA CLI -You can delete a machine pool for your Red Hat OpenShift Service on AWS (ROSA) cluster by using the ROSA CLI. - -.Prerequisites - -ifdef::openshift-rosa[] -* You created a ROSA cluster. -* The cluster is in the ready state. -* You have an existing machine pool without any taints and with at least two instances for a single-AZ cluster or three instances for a multi-AZ cluster. -endif::openshift-rosa[] -ifndef::openshift-rosa[] -* You have created an {product-title} cluster. -endif::[] - -.Procedure -. From the ROSA CLI, run the following command: -+ -[source,terminal] ----- -$ rosa delete machinepool -c=<cluster_name> <machine_pool_ID> ----- -+ -.Example output -[source,terminal] ----- -? Are you sure you want to delete machine pool <machine_pool_ID> on cluster <cluster_name>? (y/N) ----- -. Enter 'y' to delete the machine pool. -+ -The selected machine pool is deleted. diff --git a/modules/deleting-machine-pools-ocm.adoc b/modules/deleting-machine-pools-ocm.adoc deleted file mode 100644 index e15a56d3320d..000000000000 --- a/modules/deleting-machine-pools-ocm.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc -// * nodes/rosa-managing-worker-nodes.adoc -// * osd_cluster_admin/osd_nodes/osd-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="deleting-machine-pools-ocm{context}"] -ifndef::openshift-rosa[] -= Deleting a machine pool -endif::openshift-rosa[] -ifdef::openshift-rosa[] -= Deleting a machine pool using OpenShift Cluster Manager -endif::openshift-rosa[] - -You can delete a machine pool for your Red Hat OpenShift Service on AWS (ROSA) cluster by using OpenShift Cluster Manager. - -.Prerequisites - -ifdef::openshift-rosa[] -* You created a ROSA cluster. -* The cluster is in the ready state. -* You have an existing machine pool without any taints and with at least two instances for a single-AZ cluster or three instances for a multi-AZ cluster. -endif::openshift-rosa[] -ifndef::openshift-rosa[] -* You have created an {product-title} cluster. -* The newly created cluster is in the ready state. -endif::[] - -.Procedure -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that contains the machine pool that you want to delete. - -. On the selected cluster, select the *Machine pools* tab. - -. Under the *Machine pools* tab, click the options menu {kebab} for the machine pool that you want to delete. -. Click Delete. - -The selected machine pool is deleted. \ No newline at end of file diff --git a/modules/deleting-machine-pools.adoc b/modules/deleting-machine-pools.adoc deleted file mode 100644 index 472e1ceb87ad..000000000000 --- a/modules/deleting-machine-pools.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="deleting-machine-pools{context}"] -= Deleting a machine pool - -You can delete a machine pool in the event that your workload requirements have changed and your current machine pools no longer meet your needs. -// Over time, users may find that their workload needs have changed, and may want to modify the various machine pool settings. While many of these settings can be modified, certain settings (for example, instance types and availability zones) cannot be changed once a machine pool is created. If a user finds that these settings are no longer meeting their needs, they can delete the machine pool in question and create a new machine pool with the desired settings. -You can delete machine pools using the -ifdef::openshift-rosa[] -Openshift Cluster Manager or the ROSA CLI (`rosa`). -endif::openshift-rosa[] -ifndef::openshift-rosa[] -Openshift Cluster Manager. -endif::[] - -// Users that wish to delete the default machine pool that is automatically created during the installation of a {product-title} (ROSA) cluster can do so using the OCM or ROSA CLI. -// - -ifndef::openshift-rosa[] -.Prerequisites - -* You have created an {product-title} cluster. -* The cluster is in the ready state. -* You have an existing machine pool without any taints and with at least two instances for a single-AZ cluster or three instances for a multi-AZ cluster. - -.Procedure -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that contains the machine pool that you want to delete. - -. On the selected cluster, select the *Machine pools* tab. - -. Under the *Machine pools* tab, click the options menu {kebab} for the machine pool that you want to delete. -. Click *Delete*. - -The selected machine pool is deleted. -endif::openshift-rosa[] diff --git a/modules/deleting-service-cli.adoc b/modules/deleting-service-cli.adoc deleted file mode 100644 index 1d96f5e21be3..000000000000 --- a/modules/deleting-service-cli.adoc +++ /dev/null @@ -1,19 +0,0 @@ - -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="deleting-service-cli_{context}"] -= Deleting an add-on service using the CLI - -You can delete an add-on service from your {product-title} cluster by using the {cluster-manager-first} CLI (`ocm`). - -.Procedure - -* To delete the add-on service from your cluster through {cluster-manager} CLI, enter the following command: -+ -[source,terminal] ----- -$ ocm delete api/clusters_mgmt/v1/clusters/<cluster_id>/addons/<addon_id> ----- diff --git a/modules/deleting-service.adoc b/modules/deleting-service.adoc deleted file mode 100644 index 5809ae44fbe2..000000000000 --- a/modules/deleting-service.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="deleting-service_{context}"] -= Deleting an add-on service using {cluster-manager-first} - -You can delete an add-on service from your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -.Procedure - -. Navigate to the *Clusters* page in {cluster-manager-url}. - -. Click the cluster with the installed service that you want to delete. - -. Navigate to the *Add-ons* tab, and locate the installed service that you want to delete. - -. From the installed service option, click the menu and select *Uninstall add-on* from the drop-down menu. - -. You must type the name of the service that you want to delete in the confirmation message that appears. - -. Click *Uninstall*. You are returned to the *Add-ons* tab and an uninstalling state icon is present on the service option you deleted. diff --git a/modules/deleting-wmco-namespace.adoc b/modules/deleting-wmco-namespace.adoc deleted file mode 100644 index 14e230ab81d2..000000000000 --- a/modules/deleting-wmco-namespace.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/disabling-windows-container-workloads.adoc - -:_content-type: PROCEDURE -[id="deleting-wmco-namespace_{context}"] -= Deleting the Windows Machine Config Operator namespace - -You can delete the namespace that was generated for the Windows Machine Config Operator (WMCO) by default. - -.Prerequisites - -* The WMCO is removed from your cluster. - -.Procedure - -. Remove all Windows workloads that were created in the `openshift-windows-machine-config-operator` namespace: -+ -[source,terminal] ----- -$ oc delete --all pods --namespace=openshift-windows-machine-config-operator ----- - -. Verify that all pods in the `openshift-windows-machine-config-operator` namespace are deleted or are reporting a terminating state: -+ -[source,terminal] ----- -$ oc get pods --namespace openshift-windows-machine-config-operator ----- - -. Delete the `openshift-windows-machine-config-operator` namespace: -+ -[source,terminal] ----- -$ oc delete namespace openshift-windows-machine-config-operator ----- diff --git a/modules/deploy-app.adoc b/modules/deploy-app.adoc deleted file mode 100644 index 352ba6f15c4d..000000000000 --- a/modules/deploy-app.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_getting_started/rosa-getting-started.adoc -// * rosa_getting_started/rosa-quickstart-guide-ui.adoc -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="deploy-app_{context}"] -= Deploying an application from the Developer Catalog - -ifeval::["{context}" == "rosa-getting-started"] -:getting-started: -endif::[] -ifeval::["{context}" == "rosa-quickstart"] -:quickstart: -endif::[] - -From the {product-title} web console, you can deploy a test application from the Developer Catalog and expose it with a route. - -ifndef::quickstart[] -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. -endif::[] - -.Procedure - -. From the {cluster-manager} {hybrid-console-second}, click *Open console*. - -. In the *Administrator* perspective, select *Home* -> *Projects* -> *Create Project*. - -. Enter a name for your project and optionally add a *Display Name* and *Description*. - -. Click *Create* to create the project. - -. Switch to the *Developer* perspective and select *+Add*. Verify that the selected *Project* is the one that you just created. - -. In the *Developer Catalog* dialog, select *All services*. - -. In the *Developer Catalog* page, select *Languages* -> *JavaScript* from the menu. - -. Click *Node.js*, and then click *Create* to open the *Create Source-to-Image application* page. -+ -[NOTE] -==== -You might need to click *Clear All Filters* to display the *Node.js* option. -==== - -. In the *Git* section, click *Try sample*. - -. Add a unique name in the *Name* field. The value will be used to name the associated resources. - -. Confirm that *Deployment* and *Create a route* are selected. - -. Click *Create* to deploy the application. It will take a few minutes for the pods to deploy. - -. Optional: Check the status of the pods in the *Topology* pane by selecting your *nodejs* app and reviewing its sidebar. You must wait for the `nodejs` build to complete and for the `nodejs` pod to be in a *Running* state before continuing. - -. When the deployment is complete, click the route URL for the application, which has a format similar to the following: -+ ----- -https://nodejs-<project>.<cluster_name>.<hash>.<region>.openshiftapps.com/ ----- -+ -A new tab in your browser opens with a message similar to the following: -+ ----- -Welcome to your Node.js application on OpenShift ----- - -. Optional: Delete the application and clean up the resources that you created: -.. In the *Administrator* perspective, navigate to *Home* -> *Projects*. -.. Click the action menu for your project and select *Delete Project*. - -ifeval::["{context}" == "rosa-getting-started"] -:getting-started: -endif::[] -ifeval::["{context}" == "rosa-quickstart"] -:quickstart: -endif::[] diff --git a/modules/deploy-red-hat-openshift-container-storage.adoc b/modules/deploy-red-hat-openshift-container-storage.adoc deleted file mode 100644 index 089a18788feb..000000000000 --- a/modules/deploy-red-hat-openshift-container-storage.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/storage-configuration.adoc - -[options="header",cols="1,1"] -|=== - -|If you are looking for {rh-storage-first} information about... -|See the following {rh-storage-first} documentation: - -|What's new, known issues, notable bug fixes, and Technology Previews -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/4.12_release_notes[OpenShift Data Foundation 4.12 Release Notes] - -|Supported workloads, layouts, hardware and software requirements, sizing and scaling recommendations -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/planning_your_deployment[Planning your OpenShift Data Foundation 4.12 deployment] - -|Instructions on deploying {rh-storage} to use an external Red Hat Ceph Storage cluster -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_in_external_mode[Deploying OpenShift Data Foundation 4.12 in external mode] - -|Instructions on deploying {rh-storage} to local storage on bare metal infrastructure -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_bare_metal_infrastructure[Deploying OpenShift Data Foundation 4.12 using bare metal infrastructure] - -|Instructions on deploying {rh-storage} on Red Hat {product-title} VMware vSphere clusters -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_on_vmware_vsphere[Deploying OpenShift Data Foundation 4.12 on VMware vSphere] - -|Instructions on deploying {rh-storage} using Amazon Web Services for local or cloud storage -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_amazon_web_services[Deploying OpenShift Data Foundation 4.12 using Amazon Web Services] - -|Instructions on deploying and managing {rh-storage} on existing Red Hat {product-title} Google Cloud clusters -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_and_managing_openshift_data_foundation_using_google_cloud[Deploying and managing OpenShift Data Foundation 4.12 using Google Cloud] - -|Instructions on deploying and managing {rh-storage} on existing Red Hat {product-title} Azure clusters -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_microsoft_azure/index[Deploying and managing OpenShift Data Foundation 4.12 using Microsoft Azure] - -|Instructions on deploying {rh-storage} to use local storage on IBM Power infrastructure -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html-single/deploying_openshift_data_foundation_using_ibm_power/index[Deploying OpenShift Data Foundation on IBM Power] - -|Instructions on deploying {rh-storage} to use local storage on IBM Z infrastructure -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_ibm_z_infrastructure/index[Deploying OpenShift Data Foundation on IBM Z infrastructure] - -|Allocating storage to core services and hosted applications in {rh-storage-first}, including snapshot and clone -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/managing_and_allocating_storage_resources[Managing and allocating resources] - -|Managing storage resources across a hybrid cloud or multicloud environment using the Multicloud Object Gateway (NooBaa) -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/managing_hybrid_and_multicloud_resources[Managing hybrid and multicloud resources] - -|Safely replacing storage devices for {rh-storage-first} -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/replacing_devices[Replacing devices] - -|Safely replacing a node in a {rh-storage-first} cluster -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/replacing_nodes[Replacing nodes] - -|Scaling operations in {rh-storage-first} -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/scaling_storage[Scaling storage] - -|Monitoring a {rh-storage-first} 4.12 cluster -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/monitoring_openshift_data_foundation[Monitoring Red Hat OpenShift Data Foundation 4.12] - -|Resolve issues encountered during operations -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/troubleshooting_openshift_data_foundation[Troubleshooting OpenShift Data Foundation 4.12] - -|Migrating your {product-title} cluster from version 3 to version 4 -|link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.12/html/migrating_from_version_3_to_4/index[Migration] - -|=== diff --git a/modules/deploying-a-pod-that-includes-an-aws-sdk.adoc b/modules/deploying-a-pod-that-includes-an-aws-sdk.adoc deleted file mode 100644 index 93d51d709083..000000000000 --- a/modules/deploying-a-pod-that-includes-an-aws-sdk.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="deploying-a-pod-that-includes-an-aws-sdk_{context}"] -= Deploying a pod that includes an AWS SDK - -Deploy a pod in a user-defined project from a container image that includes an AWS SDK. In your pod configuration, specify the service account that includes the `eks.amazonaws.com/role-arn` annotation. - -With the service account reference in place for your pod, the pod identity webhook injects the AWS environment variables, the volume mount, and the token volume into your pod. The pod mutation enables the service account to automatically assume the AWS IAM role in the pod. - -.Prerequisites - -* You have created an AWS Identity and Access Management (IAM) role for your service account. For more information, see _Setting up an AWS IAM role for a service account_. -* You have access to a {product-title} cluster that uses the AWS Security Token Service (STS). Admin-level user privileges are not required. -* You have installed the OpenShift CLI (`oc`). -* You have created a service account in your project that includes an `eks.amazonaws.com/role-arn` annotation that references the Amazon Resource Name (ARN) for the IAM role that you want the service account to assume. -* You have a container image that includes an AWS SDK and the image is available to your cluster. For detailed steps, see _Creating an example AWS SDK container image_. -+ -[NOTE] -==== -In this example procedure, the AWS Boto3 SDK for Python is used. For more information about installing and using the AWS Boto3 SDK, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. For details about other AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. -==== - -.Procedure - -. Create a file named `awsboto3sdk-pod.yaml` with the following pod configuration: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - namespace: <project_name> <1> - name: awsboto3sdk <2> -spec: - serviceAccountName: <service_account_name> <3> - containers: - - name: awsboto3sdk - image: quay.io/<quay_username>/awsboto3sdk:latest <4> - command: - - /bin/bash - - "-c" - - "sleep 100000" <5> - terminationGracePeriodSeconds: 0 - restartPolicy: Never ----- -<1> Replace `<project_name>` with the name of your project. The name must match the project name that you specified in your AWS IAM role configuration. -<2> Specifies the name of the pod. -<3> Replace `<service_account_name>` with the name of the service account that is configured to assume the AWS IAM role. The name must match the service account name that you specified in your AWS IAM role configuration. -<4> Specifies the location of your `awsboto3sdk` container image. Replace `<quay_username>` with your Quay.io username. -<5> In this example pod configuration, this line keeps the pod running for 100000 seconds to enable verification testing in the pod directly. For detailed verification steps, see _Verifying the assumed IAM role in your pod_. - -. Deploy an `awsboto3sdk` pod: -+ -[source,terminal] ----- -$ oc create -f awsboto3sdk-pod.yaml ----- -+ -.Example output: -+ -[source,terminal] ----- -pod/awsboto3sdk created ----- diff --git a/modules/deploying-lvms-on-sno-cluster.adoc b/modules/deploying-lvms-on-sno-cluster.adoc deleted file mode 100644 index ae0cf6f97afe..000000000000 --- a/modules/deploying-lvms-on-sno-cluster.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: CONCEPT -[id="lvms-preface-sno-ran_{context}"] -= Deploying {lvms} on {sno} clusters - -You can deploy {lvms} on a {sno} bare-metal or user-provisioned infrastructure cluster and configure it to dynamically provision storage for your workloads. - -{lvms} creates a volume group using all the available unused disks and creates a single thin pool with a size of 90% of the volume group. -The remaining 10% of the volume group is left free to enable data recovery by expanding the thin pool when required. -You might need to manually perform such recovery. - -You can use persistent volume claims (PVCs) and volume snapshots provisioned by {lvms} to request storage and create volume snapshots. - -{lvms} configures a default overprovisioning limit of 10 to take advantage of the thin-provisioning feature. -The total size of the volumes and volume snapshots that can be created on the {sno} clusters is 10 times the size of the thin pool. - -You can deploy {lvms} on {sno} clusters using one of the following: - -* {rh-rhacm-first} -* {product-title} Web Console - -[id="lvms-deployment-requirements-for-sno-ran_{context}"] -== Requirements - -Before you begin deploying {lvms} on {sno} clusters, ensure that the following requirements are met: - -* You have installed {rh-rhacm-first} on an {product-title} cluster. -* Every managed {sno} cluster has dedicated disks that are used to provision storage. - -Before you deploy {lvms} on {sno} clusters, be aware of the following limitations: - -* You can only create a single instance of the `LVMCluster` custom resource (CR) on an {product-title} cluster. -* When a device becomes part of the `LVMCluster` CR, it cannot be removed. - -[id="lvms-deployment-limitations-for-sno-ran_{context}"] -== Limitations - -For deploying {sno}, LVM Storage has the following limitations: - -* The total storage size is limited by the size of the underlying Logical Volume Manager (LVM) thin pool and the overprovisioning factor. -* The size of the logical volume depends on the size of the Physical Extent (PE) and the Logical Extent (LE). -** It is possible to define the size of PE and LE during the physical and logical device creation. -** The default PE and LE size is 4 MB. -** If the size of the PE is increased, the maximum size of the LVM is determined by the kernel limits and your disk space. - -.Size limits for different architectures using the default PE and LE size -[cols="1,1,1,1,1", width="100%", options="header"] -|==== -|Architecture -|RHEL 5 -|RHEL 6 -|RHEL 7 -|RHEL 8 - -|32-bit -|16 TB -|16 TB -|- -|- - -|64-bit -|8 EB ^[1]^ - -|8 EB ^[1]^ - -100 TB ^[2]^ -|8 EB ^[1]^ - -500 TB ^[2]^ -|8 EB - -|==== -[.small] --- -1. Theoretical size. -2. Tested size. --- \ No newline at end of file diff --git a/modules/deploying-resource.adoc b/modules/deploying-resource.adoc deleted file mode 100644 index d1f7a0153087..000000000000 --- a/modules/deploying-resource.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Be sure to set the :FeatureName: and :FeatureResourceName: values in each assembly on the lines before -// the include statement for this module. For example, add the following lines to the assembly: -// :FeatureName: cluster autoscaler -// :FeatureResourceName: ClusterAutoscaler -// -// Module included in the following assemblies: -// -// * machine_management/applying-autoscaling.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="{FeatureResourceName}-deploying_{context}"] -= Deploying the {FeatureName} - -To deploy the {FeatureName}, you create an instance of the `{FeatureResourceName}` resource. - -.Procedure - -. Create a YAML file for the `{FeatureResourceName}` resource that contains the customized resource definition. - -. Create the resource in the cluster: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml <1> ----- -<1> `<filename>` is the name of the resource file that you customized. - -// Undefine attributes, so that any mistakes are easily spotted -:!FeatureName: -:!FeatureResourceName: diff --git a/modules/deployment-plug-in-cluster.adoc b/modules/deployment-plug-in-cluster.adoc deleted file mode 100644 index 1e457239846d..000000000000 --- a/modules/deployment-plug-in-cluster.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/deploy-plugin-cluster.adoc - -:_content-type: PROCEDURE -[id="deploy-on-cluster_{context}"] -= Deploy your plugin on a cluster - -After pushing an image with your changes to a registry, you can deploy the plugin to a cluster. - -.Procedure - -. To deploy your plugin to a cluster, install a Helm chart with the name of the plugin as the Helm release name into a new namespace or an existing namespace as specified by the `-n` command-line option. Provide the location of the image within the `plugin.image` parameter by using the following command: - -+ -[source,terminal] ----- -$ helm upgrade -i my-plugin charts/openshift-console-plugin -n my-plugin-namespace --create-namespace --set plugin.image=my-plugin-image-location ----- -+ -Where: -+ --- -`n <my-plugin-namespace>`:: Specifies an existing namespace to deploy your plugin into. -`--create-namespace`:: Optional: If deploying to a new namespace, use this parameter. -`--set plugin.image=my-plugin-image-location`:: Specifies the location of the image within the `plugin.image` parameter. --- - -. Optional: You can specify any additional parameters by using the set of supported parameters in the `charts/openshift-console-plugin/values.yaml` file. -+ -[source,yaml] ----- -plugin: - name: "" - description: "" - image: "" - imagePullPolicy: IfNotPresent - replicas: 2 - port: 9443 - securityContext: - enabled: true - podSecurityContext: - enabled: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - containerSecurityContext: - enabled: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - resources: - requests: - cpu: 10m - memory: 50Mi - basePath: / - certificateSecretName: "" - serviceAccount: - create: true - annotations: {} - name: "" - patcherServiceAccount: - create: true - annotations: {} - name: "" - jobs: - patchConsoles: - enabled: true - image: "registry.redhat.io/openshift4/ose-tools-rhel8@sha256:e44074f21e0cca6464e50cb6ff934747e0bd11162ea01d522433a1a1ae116103" - podSecurityContext: - enabled: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - containerSecurityContext: - enabled: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - resources: - requests: - cpu: 10m - memory: 50Mi ----- - -.Verification -* View the list of enabled plugins by navigating from *Administration* -> *Cluster Settings* -> *Configuration* -> *Console* `operator.openshift.io` -> *Console plugins* or by visiting the *Overview* page. - - -[NOTE] -==== -It can take a few minutes for the new plugin configuration to appear. If you do not see your plugin, you might need to refresh your browser if the plugin was recently enabled. If you receive any errors at runtime, check the JS console in browser developer tools to look for any errors in your plugin code. -==== diff --git a/modules/deployments-ab-testing-lb.adoc b/modules/deployments-ab-testing-lb.adoc deleted file mode 100644 index 4c2006eaa31f..000000000000 --- a/modules/deployments-ab-testing-lb.adoc +++ /dev/null @@ -1,269 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="deployments-ab-testing-lb_{context}"] -= Load balancing for A/B testing - -The user sets up a route with multiple services. Each service handles a version of the application. - -Each service is assigned a `weight` and the portion of requests to each service is the `service_weight` divided by the `sum_of_weights`. The `weight` for each service is distributed to the service's endpoints so that the sum of the endpoint `weights` is the service `weight`. - -The route can have up to four services. The `weight` for the service can be between `0` and `256`. When the `weight` is `0`, the service does not participate in load-balancing but continues to serve existing persistent connections. When the service `weight` is not `0`, each endpoint has a minimum `weight` of `1`. Because of this, a service with a lot of endpoints can end up with higher `weight` than intended. In this case, reduce the number of pods to get the expected load balance `weight`. - -//// -See the -xref:../../architecture/networking/routes.adoc#alternateBackends[Alternate -Backends and Weights] section for more information. - -The web console allows users to set the weighting and show balance between them: - -weighting.png[Visualization of Alternate Back Ends in the Web Console] -//// - -.Procedure - -To set up the A/B environment: - -. Create the two applications and give them different names. Each creates a `Deployment` object. The applications are versions of the same program; one is usually the current production version and the other the proposed new version. -.. Create the first application. The following example creates an application called `ab-example-a`: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example --name=ab-example-a ----- -+ -.. Create the second application: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example:v2 --name=ab-example-b ----- -+ -Both applications are deployed and services are created. - -. Make the application available externally via a route. At this point, you can expose either. It can be convenient to expose the current production version first and later modify the route to add the new version. -+ -[source,terminal] ----- -$ oc expose svc/ab-example-a ----- -+ -Browse to the application at `ab-example-a.<project>.<router_domain>` to verify that you see the expected version. - -. When you deploy the route, the router balances the traffic according to the `weights` specified for the services. At this point, there is a single service with default `weight=1` so all requests go to it. Adding the other service as an `alternateBackends` and adjusting the `weights` brings the A/B setup to life. This can be done by the `oc set route-backends` command or by editing the route. -+ -Setting the `oc set route-backend` to `0` means the service does not participate in load-balancing, but continues to serve existing persistent connections. -+ -[NOTE] -==== -Changes to the route just change the portion of traffic to the various services. You might have to scale the deployment to adjust the number of pods to handle the anticipated loads. -==== -+ -To edit the route, run: -+ -[source,terminal] ----- -$ oc edit route <route_name> ----- -+ -.Example output -[source,terminal] ----- -... -metadata: - name: route-alternate-service - annotations: - haproxy.router.openshift.io/balance: roundrobin -spec: - host: ab-example.my-project.my-domain - to: - kind: Service - name: ab-example-a - weight: 10 - alternateBackends: - - kind: Service - name: ab-example-b - weight: 15 -... ----- - -[id="deployments-ab-testing-lb-web_{context}"] -== Managing weights of an existing route using the web console - -.Procedure - -. Navigate to the *Networking* -> *Routes* page. - -. Click the Actions menu {kebab} next to the route you want to edit and select *Edit Route*. - -. Edit the YAML file. Update the `weight` to be an integer between `0` and `256` that specifies the relative weight of the target against other target reference objects. The value `0` suppresses requests to this back end. The default is `100`. Run `oc explain routes.spec.alternateBackends` for more information about the options. - -. Click *Save*. - -[id="deployments-ab-testing-lb-web-new-route_{context}"] -== Managing weights of an new route using the web console - -. Navigate to the *Networking* -> *Routes* page. - -. Click *Create Route*. - -. Enter the route *Name*. - -. Select the *Service*. - -. Click *Add Alternate Service*. - -. Enter a value for *Weight* and *Alternate Service Weight*. Enter a number between `0` and `255` that depicts relative weight compared with other targets. The default is `100`. - -. Select the *Target Port*. - -. Click *Create*. - -[id="deployments-ab-testing-lb-cli_{context}"] -== Managing weights using the CLI - -.Procedure - -. To manage the services and corresponding weights load balanced by the route, use the `oc set route-backends` command: -+ -[source,terminal] ----- -$ oc set route-backends ROUTENAME \ - [--zero|--equal] [--adjust] SERVICE=WEIGHT[%] [...] [options] ----- -+ -For example, the following sets `ab-example-a` as the primary service with `weight=198` and `ab-example-b` as the first alternate service with a `weight=2`: -+ -[source,terminal] ----- -$ oc set route-backends ab-example ab-example-a=198 ab-example-b=2 ----- -+ -This means 99% of traffic is sent to service `ab-example-a` and 1% to service `ab-example-b`. -+ -This command does not scale the deployment. You might be required to do so to have enough pods to handle the request load. - -. Run the command with no flags to verify the current configuration: -+ -[source,terminal] ----- -$ oc set route-backends ab-example ----- -+ -.Example output -[source,terminal] ----- -NAME KIND TO WEIGHT -routes/ab-example Service ab-example-a 198 (99%) -routes/ab-example Service ab-example-b 2 (1%) ----- - -. To alter the weight of an individual service relative to itself or to the primary service, use the `--adjust` flag. Specifying a percentage adjusts the service relative to either the primary or the first alternate (if you specify the primary). If there are other backends, their weights are kept proportional to the changed. -+ -The following example alters the weight of `ab-example-a` and `ab-example-b` services: -+ -[source,terminal] ----- -$ oc set route-backends ab-example --adjust ab-example-a=200 ab-example-b=10 ----- -+ -Alternatively, alter the weight of a service by specifying a percentage: -+ -[source,terminal] ----- -$ oc set route-backends ab-example --adjust ab-example-b=5% ----- -+ -By specifying `+` before the percentage declaration, you can adjust a weighting relative to the current setting. For example: -+ -[source,terminal] ----- -$ oc set route-backends ab-example --adjust ab-example-b=+15% ----- -+ -The `--equal` flag sets the `weight` of all services to `100`: -+ -[source,terminal] ----- -$ oc set route-backends ab-example --equal ----- -+ -The `--zero` flag sets the `weight` of all services to `0`. All requests then return with a 503 error. -+ -[NOTE] -==== -Not all routers may support multiple or weighted backends. -==== - -[id="deployments-ab-one-service-multi-dc_{context}"] -== One service, multiple `Deployment` objects - -.Procedure - -. Create a new application, adding a label `ab-example=true` that will be common to all shards: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example --name=ab-example-a --as-deployment-config=true --labels=ab-example=true --env=SUBTITLE\=shardA -$ oc delete svc/ab-example-a ----- -+ -The application is deployed and a service is created. This is the first shard. - -. Make the application available via a route, or use the service IP directly: -+ -[source,terminal] ----- -$ oc expose deployment ab-example-a --name=ab-example --selector=ab-example\=true -$ oc expose service ab-example ----- - -. Browse to the application at `ab-example-<project_name>.<router_domain>` to verify you see the `v1` image. - -. Create a second shard based on the same source image and label as the first shard, but with a different tagged version and unique environment variables: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example:v2 \ - --name=ab-example-b --labels=ab-example=true \ - SUBTITLE="shard B" COLOR="red" --as-deployment-config=true -$ oc delete svc/ab-example-b ----- - -. At this point, both sets of pods are being served under the route. However, because both browsers (by leaving a connection open) and the router (by default, through a cookie) attempt to preserve your connection to a back-end server, you might not see both shards being returned to you. -+ -To force your browser to one or the other shard: - -.. Use the `oc scale` command to reduce replicas of `ab-example-a` to `0`. -+ -[source,terminal] ----- -$ oc scale dc/ab-example-a --replicas=0 ----- -+ -Refresh your browser to show `v2` and `shard B` (in red). - -.. Scale `ab-example-a` to `1` replica and `ab-example-b` to `0`: -+ -[source,terminal] ----- -$ oc scale dc/ab-example-a --replicas=1; oc scale dc/ab-example-b --replicas=0 ----- -+ -Refresh your browser to show `v1` and `shard A` (in blue). - -. If you trigger a deployment on either shard, only the pods in that shard are affected. You can trigger a deployment by changing the `SUBTITLE` environment variable in either `Deployment` object: -+ -[source,terminal] ----- -$ oc edit dc/ab-example-a ----- -+ -or -+ -[source,terminal] ----- -$ oc edit dc/ab-example-b ----- diff --git a/modules/deployments-ab-testing.adoc b/modules/deployments-ab-testing.adoc deleted file mode 100644 index f0f4ea28202a..000000000000 --- a/modules/deployments-ab-testing.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -[id="deployments-ab-testing_{context}"] -= A/B deployments - -The A/B deployment strategy lets you try a new version of the application in a -limited way in the production environment. You can specify that the production -version gets most of the user requests while a limited fraction of requests go -to the new version. - -Because you control the portion of requests to each version, as testing -progresses you can increase the fraction of requests to the new version and -ultimately stop using the previous version. As you adjust the request load on -each version, the number of pods in each service might have to be scaled as well -to provide the expected performance. - -In addition to upgrading software, you can use this feature to experiment with -versions of the user interface. Since some users get the old version and some -the new, you can evaluate the user's reaction to the different versions to -inform design decisions. - -For this to be effective, both the old and new versions must be similar enough -that both can run at the same time. This is common with bug fix releases and -when new features do not interfere with the old. The versions require N-1 -compatibility to properly work together. - -{product-title} supports N-1 compatibility through the web console as well as -the CLI. diff --git a/modules/deployments-accessing-private-repos.adoc b/modules/deployments-accessing-private-repos.adoc deleted file mode 100644 index e20d9a1a6a07..000000000000 --- a/modules/deployments-accessing-private-repos.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-accessing-private-repos_{context}"] -= Accessing private repositories from DeploymentConfig objects - -You can add a secret to your `DeploymentConfig` object so that it can access images from a private repository. This procedure shows the {product-title} web console method. - -.Procedure - -. Create a new project. - -. From the *Workloads* page, create a secret that contains credentials for accessing a private image repository. - -. Create a `DeploymentConfig` object. - -. On the `DeploymentConfig` object editor page, set the *Pull Secret* and save your changes. diff --git a/modules/deployments-assigning-pods-to-nodes.adoc b/modules/deployments-assigning-pods-to-nodes.adoc deleted file mode 100644 index 1f7d7706ae87..000000000000 --- a/modules/deployments-assigning-pods-to-nodes.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-assigning-pods-to-nodes_{context}"] -= Assigning pods to specific nodes - -You can use node selectors in conjunction with labeled nodes to control pod -placement. - -Cluster administrators can set the default node selector for a project in order -to restrict pod placement to specific nodes. As a developer, you can set a node -selector on a `Pod` configuration to restrict nodes even further. - -.Procedure - -. To add a node selector when creating a pod, edit the `Pod` configuration, and add -the `nodeSelector` value. This can be added to a single `Pod` configuration, or in -a `Pod` template: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -spec: - nodeSelector: - disktype: ssd -... ----- -+ -Pods created when the node selector is in place are assigned to nodes with the -specified labels. The labels specified here are used in conjunction with the -labels added by a cluster administrator. -+ -For example, if a project has the `type=user-node` and `region=east` labels -added to a project by the cluster administrator, and you add the above -`disktype: ssd` label to a pod, the pod is only ever scheduled on nodes that -have all three labels. -+ -[NOTE] -==== -Labels can only be set to one value, so setting a node selector of `region=west` -in a `Pod` configuration that has `region=east` as the administrator-set default, -results in a pod that will never be scheduled. -==== diff --git a/modules/deployments-blue-green.adoc b/modules/deployments-blue-green.adoc deleted file mode 100644 index e73648eb4200..000000000000 --- a/modules/deployments-blue-green.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="deployments-blue-green_{context}"] -= Blue-green deployments - -Blue-green deployments involve running two versions of an application at the same time and moving traffic from the in-production version (the blue version) to the newer version (the green version). You can use a rolling strategy or switch services in a route. - -Because many applications depend on persistent data, you must have an application that supports _N-1 compatibility_, which means it shares data and implements live migration between the database, store, or disk by creating two copies of the data layer. - -Consider the data used in testing the new version. If it is the production data, a bug in the new version can break the production version. - -[id="deployments-blue-green-setting-up_{context}"] -== Setting up a blue-green deployment - -Blue-green deployments use two `Deployment` objects. Both are running, and the one in production depends on the service the route specifies, with each `Deployment` object exposed to a different service. - -[NOTE] -==== -Routes are intended for web (HTTP and HTTPS) traffic, so this technique is best suited for web applications. -==== - -You can create a new route to the new version and test it. When ready, change the service in the production route to point to the new service and the new (green) version is live. - -If necessary, you can roll back to the older (blue) version by switching the service back to the previous version. - -.Procedure - -. Create two independent application components. -.. Create a copy of the example application running the `v1` image under the `example-blue` service: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example:v1 --name=example-blue ----- -+ -.. Create a second copy that uses the `v2` image under the `example-green` service: -+ -[source,terminal] ----- -$ oc new-app openshift/deployment-example:v2 --name=example-green ----- - -. Create a route that points to the old service: -+ -[source,terminal] ----- -$ oc expose svc/example-blue --name=bluegreen-example ----- - -. Browse to the application at `bluegreen-example-<project>.<router_domain>` to verify you see the `v1` image. - -. Edit the route and change the service name to `example-green`: -+ -[source,terminal] ----- -$ oc patch route/bluegreen-example -p '{"spec":{"to":{"name":"example-green"}}}' ----- - -. To verify that the route has changed, refresh the browser until you see the `v2` image. diff --git a/modules/deployments-canary-deployments.adoc b/modules/deployments-canary-deployments.adoc deleted file mode 100644 index 6ac3331f5571..000000000000 --- a/modules/deployments-canary-deployments.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -[id="deployments-canary-deployments_{context}"] -= Canary deployments - -All rolling deployments in {product-title} are _canary deployments_; a new version (the canary) is tested before all of the old instances are replaced. If the readiness check never succeeds, the canary instance is removed and the `DeploymentConfig` object will be automatically rolled back. - -The readiness check is part of the application code and can be as sophisticated as necessary to ensure the new instance is ready to be used. If you must implement more complex checks of the application (such as sending real user workloads to the new instance), consider implementing a custom deployment or using a blue-green deployment strategy. diff --git a/modules/deployments-comparing-deploymentconfigs.adoc b/modules/deployments-comparing-deploymentconfigs.adoc deleted file mode 100644 index 3c8231353dfa..000000000000 --- a/modules/deployments-comparing-deploymentconfigs.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/what-deployments-are.adoc - -[id="deployments-comparing-deploymentconfigs_{context}"] -= Comparing Deployment and DeploymentConfig objects - -Both Kubernetes `Deployment` objects and {product-title}-provided `DeploymentConfig` objects are supported in {product-title}; however, it is recommended to use `Deployment` objects unless you need a specific feature or behavior provided by `DeploymentConfig` objects. - -The following sections go into more detail on the differences between the two object types to further help you decide which type to use. - -[id="deployments-design_{context}"] -== Design - -One important difference between `Deployment` and `DeploymentConfig` objects is the properties of the link:https://en.wikipedia.org/wiki/CAP_theorem[CAP theorem] that each design has chosen for the rollout process. `DeploymentConfig` objects prefer consistency, whereas `Deployments` objects take availability over consistency. - -For `DeploymentConfig` objects, if a node running a deployer pod goes down, it will not get replaced. The process waits until the node comes back online or is manually deleted. Manually deleting the node also deletes the corresponding pod. This means that you can not delete the pod to unstick the rollout, as the kubelet is responsible for deleting the associated pod. - -However, deployment rollouts are driven from a controller manager. The controller manager runs in high availability mode on masters and uses leader election algorithms to value availability over consistency. During a failure it is possible for other masters to act on the same deployment at the same time, but this issue will be reconciled shortly after the failure occurs. - -[id="delpoymentconfigs-specific-features_{context}"] -== DeploymentConfig object-specific features - -[discrete] -==== Automatic rollbacks - -Currently, deployments do not support automatically rolling back to the last successfully deployed replica set in case of a failure. - -[discrete] -==== Triggers - -Deployments have an implicit config change trigger in that every change in the pod template of a deployment automatically triggers a new rollout. -If you do not want new rollouts on pod template changes, pause the deployment: - -[source,terminal] ----- -$ oc rollout pause deployments/<name> ----- - -[discrete] -==== Lifecycle hooks - -Deployments do not yet support any lifecycle hooks. - -[discrete] -==== Custom strategies - -Deployments do not support user-specified custom deployment strategies yet. - -[id="delpoyments-specific-features_{context}"] -== Deployment-specific features - -[discrete] -==== Rollover - -The deployment process for `Deployment` objects is driven by a controller loop, in contrast to `DeploymentConfig` objects which use deployer pods for every new rollout. This means that the `Deployment` object can have as many active replica sets as possible, and eventually the deployment controller will scale down all old replica sets and scale up the newest one. - -`DeploymentConfig` objects can have at most one deployer pod running, otherwise multiple deployers end up conflicting while trying to scale up what they think should be the newest replication controller. Because of this, only two replication controllers can be active at any point in time. Ultimately, this translates to faster rapid rollouts for `Deployment` objects. - -[discrete] -==== Proportional scaling - -Because the deployment controller is the sole source of truth for the sizes of new and old replica sets owned by a `Deployment` object, it is able to scale ongoing rollouts. Additional replicas are distributed proportionally based on the size of each replica set. - -`DeploymentConfig` objects cannot be scaled when a rollout is ongoing because the controller will end up having issues with the deployer process about the size of the new replication controller. - -[discrete] -==== Pausing mid-rollout - -Deployments can be paused at any point in time, meaning you can also pause ongoing rollouts. On the other hand, you cannot pause deployer pods -currently, so if you try to pause a deployment in the middle of a rollout, the deployer process will not be affected and will continue until it finishes. diff --git a/modules/deployments-custom-strategy.adoc b/modules/deployments-custom-strategy.adoc deleted file mode 100644 index 66dcb82cc8b6..000000000000 --- a/modules/deployments-custom-strategy.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -[id="deployments-custom-strategy_{context}"] -= Custom strategy - -The custom strategy allows you to provide your own deployment behavior. - -.Example custom strategy definition -[source,yaml] ----- -strategy: - type: Custom - customParams: - image: organization/strategy - command: [ "command", "arg1" ] - environment: - - name: ENV_1 - value: VALUE_1 ----- - -In the above example, the `organization/strategy` container image provides the deployment behavior. The optional `command` array overrides any `CMD` directive specified in the image's `Dockerfile`. The optional environment variables provided are added to the execution environment of the strategy process. - -Additionally, {product-title} provides the following environment variables to the deployment process: - -[cols="4,8",options="header"] -|=== -|Environment variable |Description - -.^|`OPENSHIFT_DEPLOYMENT_NAME` -|The name of the new deployment, a replication controller. - -.^|`OPENSHIFT_DEPLOYMENT_NAMESPACE` -|The name space of the new deployment. -|=== - -The replica count of the new deployment will initially be zero. The responsibility of the strategy is to make the new deployment active using the -logic that best serves the needs of the user. - -Alternatively, use the `customParams` object to inject the custom deployment logic into the existing deployment strategies. Provide a custom shell script logic and call the `openshift-deploy` binary. Users do not have to supply their custom deployer container image; in this case, the default {product-title} deployer image is used instead: - -[source,yaml] ----- -strategy: - type: Rolling - customParams: - command: - - /bin/sh - - -c - - | - set -e - openshift-deploy --until=50% - echo Halfway there - openshift-deploy - echo Complete ----- - -This results in following deployment: - -[source,terminal] ----- -Started deployment #2 ---> Scaling up custom-deployment-2 from 0 to 2, scaling down custom-deployment-1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) - Scaling custom-deployment-2 up to 1 ---> Reached 50% (currently 50%) -Halfway there ---> Scaling up custom-deployment-2 from 1 to 2, scaling down custom-deployment-1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) - Scaling custom-deployment-1 down to 1 - Scaling custom-deployment-2 up to 2 - Scaling custom-deployment-1 down to 0 ---> Success -Complete ----- - -If the custom deployment strategy process requires access to the {product-title} API or the Kubernetes API the container that executes the strategy can use the service account token available inside the container for authentication. diff --git a/modules/deployments-deploymentconfigs.adoc b/modules/deployments-deploymentconfigs.adoc deleted file mode 100644 index 63f5468a15b9..000000000000 --- a/modules/deployments-deploymentconfigs.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/what-deployments-are.adoc - -[id="deployments-and-deploymentconfigs_{context}"] -= DeploymentConfig objects - -Building on replication controllers, {product-title} adds expanded support for the software development and deployment lifecycle with the concept of `DeploymentConfig` objects. In the simplest case, a `DeploymentConfig` object creates a new replication controller and lets it start up pods. - -However, {product-title} deployments from `DeploymentConfig` objects also provide the ability to transition from an existing deployment of an image to a new one and also define hooks to be run before or after creating the replication controller. - -The `DeploymentConfig` deployment system provides the following capabilities: - -* A `DeploymentConfig` object, which is a template for running applications. -* Triggers that drive automated deployments in response to events. -* User-customizable deployment strategies to transition from the previous version to the new version. A strategy runs inside a pod commonly referred as the deployment process. -* A set of hooks (lifecycle hooks) for executing custom behavior in different points during the lifecycle of a deployment. -* Versioning of your application to support rollbacks either manually or automatically in case of deployment failure. -* Manual replication scaling and autoscaling. - -When you create a `DeploymentConfig` object, a replication controller is created representing the `DeploymentConfig` object's pod template. If the deployment changes, a new replication controller is created with the latest pod template, and a deployment process runs to scale down the old replication controller and scale up the new one. - -Instances of your application are automatically added and removed from both service load balancers and routers as they are created. As long as your application supports graceful shutdown when it receives the `TERM` signal, you can ensure that running user connections are given a chance to complete normally. - -The {product-title} `DeploymentConfig` object defines the following details: - -. The elements of a `ReplicationController` definition. -. Triggers for creating a new deployment automatically. -. The strategy for transitioning between deployments. -. Lifecycle hooks. - -Each time a deployment is triggered, whether manually or automatically, a deployer pod manages the deployment (including scaling down the old -replication controller, scaling up the new one, and running hooks). The deployment pod remains for an indefinite amount of time after it completes the deployment to retain its logs of the deployment. When a deployment is superseded by another, the previous replication controller is retained to enable easy rollback if needed. - -.Example `DeploymentConfig` definition -[source,yaml] ----- -apiVersion: apps.openshift.io/v1 -kind: DeploymentConfig -metadata: - name: frontend -spec: - replicas: 5 - selector: - name: frontend - template: { ... } - triggers: - - type: ConfigChange <1> - - imageChangeParams: - automatic: true - containerNames: - - helloworld - from: - kind: ImageStreamTag - name: hello-openshift:latest - type: ImageChange <2> - strategy: - type: Rolling <3> ----- -<1> A configuration change trigger results in a new replication controller whenever changes are detected in the pod template of the deployment configuration. -<2> An image change trigger causes a new deployment to be created each time a new version of the backing image is available in the named image stream. -<3> The default `Rolling` strategy makes a downtime-free transition between deployments. diff --git a/modules/deployments-exec-cmd-in-container.adoc b/modules/deployments-exec-cmd-in-container.adoc deleted file mode 100644 index 5c6edc97af18..000000000000 --- a/modules/deployments-exec-cmd-in-container.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-exe-cmd-in-container_{context}"] -= Executing commands inside a container - -You can add a command to a container, which modifies the container's startup behavior by overruling the image's `ENTRYPOINT`. This is different from a lifecycle hook, which instead can be run once per deployment at a specified time. - -.Procedure - -. Add the `command` parameters to the `spec` field of the `DeploymentConfig` object. You can also add an `args` field, which modifies the `command` (or the `ENTRYPOINT` if `command` does not exist). -+ -[source,yaml] ----- -spec: - containers: - - name: <container_name> - image: 'image' - command: - - '<command>' - args: - - '<argument_1>' - - '<argument_2>' - - '<argument_3>' ----- -+ -For example, to execute the `java` command with the `-jar` and `/opt/app-root/springboots2idemo.jar` arguments: -+ -[source,yaml] ----- -spec: - containers: - - name: example-spring-boot - image: 'image' - command: - - java - args: - - '-jar' - - /opt/app-root/springboots2idemo.jar ----- diff --git a/modules/deployments-graceful-termination.adoc b/modules/deployments-graceful-termination.adoc deleted file mode 100644 index ba10afd42803..000000000000 --- a/modules/deployments-graceful-termination.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -[id="deployments-graceful-termination_{context}"] -= Graceful termination - -{product-title} and Kubernetes give application instances time to shut down before removing them from load balancing rotations. However, applications must ensure they cleanly terminate user connections as well before they exit. - -On shutdown, {product-title} sends a `TERM` signal to the processes in the container. Application code, on receiving `SIGTERM`, stop accepting new connections. This ensures that load balancers route traffic to other active instances. The application code then waits until all open connections are closed, or gracefully terminate individual connections at the next opportunity, before exiting. - -After the graceful termination period expires, a process that has not exited is sent the `KILL` signal, which immediately ends the process. The -`terminationGracePeriodSeconds` attribute of a pod or pod template controls the graceful termination period (default 30 seconds) and can be customized per application as necessary. diff --git a/modules/deployments-kube-deployments.adoc b/modules/deployments-kube-deployments.adoc deleted file mode 100644 index 831d85a783f8..000000000000 --- a/modules/deployments-kube-deployments.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/what-deployments-are.adoc - -[id="deployments-kube-deployments_{context}"] -= Deployments - -Kubernetes provides a first-class, native API object type in {product-title} called `Deployment`. `Deployment` objects serve as a descendant of the {product-title}-specific `DeploymentConfig` object. - -Like `DeploymentConfig` objects, `Deployment` objects describe the desired state of a particular component of an application as a pod template. Deployments create replica sets, which orchestrate pod lifecycles. - -For example, the following deployment definition creates a replica set to bring up one `hello-openshift` pod: - -.Deployment definition -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-openshift -spec: - replicas: 1 - selector: - matchLabels: - app: hello-openshift - template: - metadata: - labels: - app: hello-openshift - spec: - containers: - - name: hello-openshift - image: openshift/hello-openshift:latest - ports: - - containerPort: 80 ----- diff --git a/modules/deployments-lifecycle-hooks.adoc b/modules/deployments-lifecycle-hooks.adoc deleted file mode 100644 index a373a6866b68..000000000000 --- a/modules/deployments-lifecycle-hooks.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="deployments-lifecycle-hooks_{context}"] -= Lifecycle hooks - -The rolling and recreate strategies support _lifecycle hooks_, or deployment hooks, which allow behavior to be injected into the deployment process at predefined points within the strategy: - -.Example `pre` lifecycle hook -[source,yaml] ----- -pre: - failurePolicy: Abort - execNewPod: {} <1> ----- -<1> `execNewPod` is a pod-based lifecycle hook. - -Every hook has a _failure policy_, which defines the action the strategy should take when a hook failure is encountered: - -[cols="2,8"] -|=== - -.^|`Abort` -|The deployment process will be considered a failure if the hook fails. - -.^|`Retry` -|The hook execution should be retried until it succeeds. - -.^|`Ignore` -|Any hook failure should be ignored and the deployment should proceed. -|=== - -Hooks have a type-specific field that describes how to execute the hook. Currently, pod-based hooks are the only supported hook type, specified by the `execNewPod` field. - -[discrete] -==== Pod-based lifecycle hook - -Pod-based lifecycle hooks execute hook code in a new pod derived from the template in a `DeploymentConfig` object. - -The following simplified example deployment uses the rolling strategy. Triggers and some other minor details are omitted for brevity: - -[source,yaml] ----- -kind: DeploymentConfig -apiVersion: apps.openshift.io/v1 -metadata: - name: frontend -spec: - template: - metadata: - labels: - name: frontend - spec: - containers: - - name: helloworld - image: openshift/origin-ruby-sample - replicas: 5 - selector: - name: frontend - strategy: - type: Rolling - rollingParams: - pre: - failurePolicy: Abort - execNewPod: - containerName: helloworld <1> - command: [ "/usr/bin/command", "arg1", "arg2" ] <2> - env: <3> - - name: CUSTOM_VAR1 - value: custom_value1 - volumes: - - data <4> ----- -<1> The `helloworld` name refers to `spec.template.spec.containers[0].name`. -<2> This `command` overrides any `ENTRYPOINT` defined by the `openshift/origin-ruby-sample` image. -<3> `env` is an optional set of environment variables for the hook container. -<4> `volumes` is an optional set of volume references for the hook container. - -In this example, the `pre` hook will be executed in a new pod using the `openshift/origin-ruby-sample` image from the `helloworld` container. The hook pod has the following properties: - -* The hook command is `/usr/bin/command arg1 arg2`. -* The hook container has the `CUSTOM_VAR1=custom_value1` environment variable. -* The hook failure policy is `Abort`, meaning the deployment process fails if the hook fails. -* The hook pod inherits the `data` volume from the `DeploymentConfig` object pod. - -[id="deployments-setting-lifecycle-hooks_{context}"] -== Setting lifecycle hooks - -You can set lifecycle hooks, or deployment hooks, for a deployment using the CLI. - -.Procedure - -. Use the `oc set deployment-hook` command to set the type of hook you want: `--pre`, `--mid`, or `--post`. For example, to set a pre-deployment hook: -+ -[source,terminal] ----- -$ oc set deployment-hook dc/frontend \ - --pre -c helloworld -e CUSTOM_VAR1=custom_value1 \ - --volumes data --failure-policy=abort -- /usr/bin/command arg1 arg2 ----- diff --git a/modules/deployments-n1-compatibility.adoc b/modules/deployments-n1-compatibility.adoc deleted file mode 100644 index 52ad646196d2..000000000000 --- a/modules/deployments-n1-compatibility.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -[id="deployments-n1-compatibility_{context}"] -= N-1 compatibility - -Applications that have new code and old code running at the same time must be -careful to ensure that data written by the new code can be read and handled (or -gracefully ignored) by the old version of the code. This is sometimes called -_schema evolution_ and is a complex problem. - -This can take many forms: data stored on disk, in a database, in a temporary -cache, or that is part of a user's browser session. While most web applications -can support rolling deployments, it is important to test and design your -application to handle it. - -For some applications, the period of time that old code and new code is running -side by side is short, so bugs or some failed user transactions are acceptable. -For others, the failure pattern may result in the entire application becoming -non-functional. - -One way to validate N-1 compatibility is to use an A/B deployment: run the old -code and new code at the same time in a controlled way in a test environment, -and verify that traffic that flows to the new deployment does not cause failures -in the old deployment. diff --git a/modules/deployments-proxy-shards.adoc b/modules/deployments-proxy-shards.adoc deleted file mode 100644 index 9a0cc7dc89b2..000000000000 --- a/modules/deployments-proxy-shards.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/route-based-deployment-strategies.adoc - -[id="deployments-proxy-shard_{context}"] -= Proxy shards and traffic splitting - -In production environments, you can precisely control the distribution of -traffic that lands on a particular shard. When dealing with large numbers of -instances, you can use the relative scale of individual shards to implement -percentage based traffic. That combines well with a _proxy shard_, which -forwards or splits the traffic it receives to a separate service or application -running elsewhere. - -In the simplest configuration, the proxy forwards requests unchanged. In -more complex setups, you can duplicate the incoming requests and send to -both a separate cluster as well as to a local instance of the application, and -compare the result. Other patterns include keeping the caches of a DR -installation warm, or sampling incoming traffic for analysis purposes. - -Any TCP (or UDP) proxy could be run under the desired shard. Use the `oc scale` -command to alter the relative number of instances serving requests under the -proxy shard. For more complex traffic management, consider customizing the -{product-title} router with proportional balancing capabilities. diff --git a/modules/deployments-recreate-strategy.adoc b/modules/deployments-recreate-strategy.adoc deleted file mode 100644 index dca822276508..000000000000 --- a/modules/deployments-recreate-strategy.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -[id="deployments-recreate-strategy_{context}"] -= Recreate strategy - -The recreate strategy has basic rollout behavior and supports lifecycle hooks for injecting code into the deployment process. - -.Example recreate strategy definition -[source,yaml] ----- -strategy: - type: Recreate - recreateParams: <1> - pre: {} <2> - mid: {} - post: {} ----- - -<1> `recreateParams` are optional. -<2> `pre`, `mid`, and `post` are lifecycle hooks. - -The recreate strategy: - -. Executes any `pre` lifecycle hook. -. Scales down the previous deployment to zero. -. Executes any `mid` lifecycle hook. -. Scales up the new deployment. -. Executes any `post` lifecycle hook. - -[IMPORTANT] -==== -During scale up, if the replica count of the deployment is greater than one, the first replica of the deployment will be validated for readiness before fully scaling up the deployment. If the validation of the first replica fails, the deployment will be considered a failure. -==== - -*When to use a recreate deployment:* - -- When you must run migrations or other data transformations before your new code starts. -- When you do not support having new and old versions of your application code running at the same time. -- When you want to use a RWO volume, which is not supported being shared between multiple replicas. - -A recreate deployment incurs downtime because, for a brief period, no instances of your application are running. However, your old code and new code do not run at the same time. diff --git a/modules/deployments-replicasets.adoc b/modules/deployments-replicasets.adoc deleted file mode 100644 index fe2bbb4820c6..000000000000 --- a/modules/deployments-replicasets.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/what-deployments-are.adoc - -[id="deployments-repliasets_{context}"] -= Replica sets - -Similar to a replication controller, a `ReplicaSet` is a native Kubernetes API object that ensures a specified number of pod replicas are running at any given time. The difference between a replica set and a replication controller is that a replica set supports set-based selector requirements whereas a replication controller only supports equality-based selector requirements. - -[NOTE] -==== -Only use replica sets if you require custom update orchestration or do not require updates at all. Otherwise, use deployments. Replica sets can be used independently, but are used by deployments to orchestrate pod creation, deletion, and updates. Deployments manage their replica sets automatically, provide declarative updates to pods, and do not have to manually manage the replica sets that they create. -==== - -The following is an example `ReplicaSet` definition: - -[source,yaml] ----- -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: frontend-1 - labels: - tier: frontend -spec: - replicas: 3 - selector: <1> - matchLabels: <2> - tier: frontend - matchExpressions: <3> - - {key: tier, operator: In, values: [frontend]} - template: - metadata: - labels: - tier: frontend - spec: - containers: - - image: openshift/hello-openshift - name: helloworld - ports: - - containerPort: 8080 - protocol: TCP - restartPolicy: Always ----- -<1> A label query over a set of resources. The result of `matchLabels` and `matchExpressions` are logically conjoined. -<2> Equality-based selector to specify resources with labels that match the selector. -<3> Set-based selector to filter keys. This selects all resources with key equal to `tier` and value equal to `frontend`. diff --git a/modules/deployments-replicationcontrollers.adoc b/modules/deployments-replicationcontrollers.adoc deleted file mode 100644 index eef11545854f..000000000000 --- a/modules/deployments-replicationcontrollers.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/what-deployments-are.adoc - -[id="deployments-replicationcontrollers_{context}"] -= Replication controllers - -A replication controller ensures that a specified number of replicas of a pod are running at all times. If pods exit or are deleted, the replication controller acts to instantiate more up to the defined number. Likewise, if there are more running than desired, it deletes as many as necessary to match the defined amount. - -A replication controller configuration consists of: - -* The number of replicas desired, which can be adjusted at run time. -* A `Pod` definition to use when creating a replicated pod. -* A selector for identifying managed pods. - -A selector is a set of labels assigned to the pods that are managed by the replication controller. These labels are included in the `Pod` definition that the replication controller instantiates. The replication controller uses the selector to determine how many instances of the pod are already running in order to adjust as needed. - -The replication controller does not perform auto-scaling based on load or traffic, as it does not track either. Rather, this requires its replica -count to be adjusted by an external auto-scaler. - -The following is an example definition of a replication controller: - -[source,yaml] ----- -apiVersion: v1 -kind: ReplicationController -metadata: - name: frontend-1 -spec: - replicas: 1 <1> - selector: <2> - name: frontend - template: <3> - metadata: - labels: <4> - name: frontend <5> - spec: - containers: - - image: openshift/hello-openshift - name: helloworld - ports: - - containerPort: 8080 - protocol: TCP - restartPolicy: Always ----- -<1> The number of copies of the pod to run. -<2> The label selector of the pod to run. -<3> A template for the pod the controller creates. -<4> Labels on the pod should include those from the label selector. -<5> The maximum name length after expanding any parameters is 63 characters. diff --git a/modules/deployments-retrying-deployment.adoc b/modules/deployments-retrying-deployment.adoc deleted file mode 100644 index c2f04a91d147..000000000000 --- a/modules/deployments-retrying-deployment.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-retrying-deployment_{context}"] -= Retrying a deployment - -If the current revision of your `DeploymentConfig` object failed to deploy, you can restart the deployment process. - -.Procedure - -. To restart a failed deployment process: -+ -[source,terminal] ----- -$ oc rollout retry dc/<name> ----- -+ -If the latest revision of it was deployed successfully, the command displays a message and the deployment process is not retried. -+ -[NOTE] -==== -Retrying a deployment restarts the deployment process and does not create a new deployment revision. The restarted replication controller has the same configuration it had when it failed. -==== diff --git a/modules/deployments-rolling-back.adoc b/modules/deployments-rolling-back.adoc deleted file mode 100644 index b8845944f77f..000000000000 --- a/modules/deployments-rolling-back.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-rolling-back_{context}"] -= Rolling back a deployment - -Rollbacks revert an application back to a previous revision and can be performed using the REST API, the CLI, or the web console. - -.Procedure - -. To rollback to the last successful deployed revision of your configuration: -+ -[source,terminal] ----- -$ oc rollout undo dc/<name> ----- -+ -The `DeploymentConfig` object's template is reverted to match the deployment revision specified in the undo command, and a new replication controller is started. If no revision is specified with `--to-revision`, then the last successfully deployed revision is used. - -. Image change triggers on the `DeploymentConfig` object are disabled as part of the rollback to prevent accidentally starting a new deployment process soon after the rollback is complete. -+ -To re-enable the image change triggers: -+ -[source,terminal] ----- -$ oc set triggers dc/<name> --auto ----- - -[NOTE] -==== -Deployment configs also support automatically rolling back to the last successful revision of the configuration in case the latest deployment process fails. In that case, the latest template that failed to deploy stays intact by the system and it is up to users to fix their configurations. -==== diff --git a/modules/deployments-rolling-strategy.adoc b/modules/deployments-rolling-strategy.adoc deleted file mode 100644 index 3105608436d2..000000000000 --- a/modules/deployments-rolling-strategy.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -[id="deployments-rolling-strategy_{context}"] -= Rolling strategy - -A rolling deployment slowly replaces instances of the previous version of an application with instances of the new version of the application. The rolling strategy is the default deployment strategy used if no strategy is specified on a `DeploymentConfig` object. - -A rolling deployment typically waits for new pods to become `ready` via a readiness check before scaling down the old components. If a significant issue occurs, the rolling deployment can be aborted. - -*When to use a rolling deployment:* - -- When you want to take no downtime during an application update. -- When your application supports having old code and new code running at the same time. - -A rolling deployment means you have both old and new versions of your code running at the same time. This typically requires that your application handle N-1 compatibility. - -.Example rolling strategy definition -[source,yaml] ----- -strategy: - type: Rolling - rollingParams: - updatePeriodSeconds: 1 <1> - intervalSeconds: 1 <2> - timeoutSeconds: 120 <3> - maxSurge: "20%" <4> - maxUnavailable: "10%" <5> - pre: {} <6> - post: {} ----- -<1> The time to wait between individual pod updates. If unspecified, this value defaults to `1`. -<2> The time to wait between polling the deployment status after update. If unspecified, this value defaults to `1`. -<3> The time to wait for a scaling event before giving up. Optional; the default is `600`. Here, _giving up_ means automatically rolling back to the previous complete deployment. -<4> `maxSurge` is optional and defaults to `25%` if not specified. See the information below the following procedure. -<5> `maxUnavailable` is optional and defaults to `25%` if not specified. See the information below the following procedure. -<6> `pre` and `post` are both lifecycle hooks. - -The rolling strategy: - -. Executes any `pre` lifecycle hook. -. Scales up the new replication controller based on the surge count. -. Scales down the old replication controller based on the max unavailable count. -. Repeats this scaling until the new replication controller has reached the desired replica count and the old replication controller has been scaled to zero. -. Executes any `post` lifecycle hook. - -[IMPORTANT] -==== -When scaling down, the rolling strategy waits for pods to become ready so it can decide whether further scaling would affect availability. If scaled up pods never become ready, the deployment process will eventually time out and result in a deployment failure. -==== - -The `maxUnavailable` parameter is the maximum number of pods that can be unavailable during the update. The `maxSurge` parameter is the maximum number of pods that can be scheduled above the original number of pods. Both parameters can be set to either a percentage (e.g., `10%`) or an absolute value (e.g., `2`). The default value for both is `25%`. - -These parameters allow the deployment to be tuned for availability and speed. For example: - -- `maxUnavailable*=0` and `maxSurge*=20%` ensures full capacity is maintained during the update and rapid scale up. -- `maxUnavailable*=10%` and `maxSurge*=0` performs an update using no extra capacity (an in-place update). -- `maxUnavailable*=10%` and `maxSurge*=10%` scales up and down quickly with some potential for capacity loss. - -Generally, if you want fast rollouts, use `maxSurge`. If you have to take into account resource quota and can accept partial unavailability, use -`maxUnavailable`. diff --git a/modules/deployments-running-pod-svc-acct.adoc b/modules/deployments-running-pod-svc-acct.adoc deleted file mode 100644 index b02377390a70..000000000000 --- a/modules/deployments-running-pod-svc-acct.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-running-pod-svc-acct_{context}"] -= Running a pod with a different service account - -You can run a pod with a service account other than the default. - -.Procedure - -. Edit the `DeploymentConfig` object: -+ -[source,terminal] ----- -$ oc edit dc/<deployment_config> ----- - -. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` field, and specify the service account you want to use: -+ -[source,yaml] ----- -spec: - securityContext: {} - serviceAccount: <service_account> - serviceAccountName: <service_account> ----- diff --git a/modules/deployments-scaling-manually.adoc b/modules/deployments-scaling-manually.adoc deleted file mode 100644 index 4df16c9cca92..000000000000 --- a/modules/deployments-scaling-manually.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-scaling-manually_{context}"] -= Scaling manually - -In addition to rollbacks, you can exercise fine-grained control over the number of replicas by manually scaling them. - -[NOTE] -==== -Pods can also be auto-scaled using the `oc autoscale` command. -==== - -.Procedure - -. To manually scale a `DeploymentConfig` object, use the `oc scale` command. For example, the following command sets the replicas in the `frontend` `DeploymentConfig` object to `3`. -+ -[source,terminal] ----- -$ oc scale dc frontend --replicas=3 ----- -+ -The number of replicas eventually propagates to the desired and current state of the deployment configured by the `DeploymentConfig` object `frontend`. diff --git a/modules/deployments-setting-resources.adoc b/modules/deployments-setting-resources.adoc deleted file mode 100644 index 2f2bbd2a01ae..000000000000 --- a/modules/deployments-setting-resources.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-setting-resources_{context}"] -= Setting deployment resources - -A deployment is completed by a pod that consumes resources (memory, CPU, and ephemeral storage) on a node. By default, pods consume unbounded node resources. However, if a project specifies default container limits, then pods consume resources up to those limits. - -[NOTE] -==== -The minimum memory limit for a deployment is 12 MB. If a container fails to start due to a `Cannot allocate memory` pod event, the memory limit is too low. Either increase or remove the memory limit. Removing the limit allows pods to consume unbounded node resources. -==== - -You can also limit resource use by specifying resource limits as part of the deployment strategy. Deployment resources can be used with the recreate, rolling, or custom deployment strategies. - -.Procedure - -. In the following example, each of `resources`, `cpu`, `memory`, and `ephemeral-storage` is optional: -+ -[source,yaml] ----- -type: "Recreate" -resources: - limits: - cpu: "100m" <1> - memory: "256Mi" <2> - ephemeral-storage: "1Gi" <3> ----- -<1> `cpu` is in CPU units: `100m` represents 0.1 CPU units (100 * 1e-3). -<2> `memory` is in bytes: `256Mi` represents 268435456 bytes (256 * 2 ^ 20). -<3> `ephemeral-storage` is in bytes: `1Gi` represents 1073741824 bytes (2 ^ 30). -+ -However, if a quota has been defined for your project, one of the following two items is required: -+ --- -- A `resources` section set with an explicit `requests`: -+ -[source,yaml] ----- - type: "Recreate" - resources: - requests: <1> - cpu: "100m" - memory: "256Mi" - ephemeral-storage: "1Gi" ----- -<1> The `requests` object contains the list of resources that correspond to the list of resources in the quota. - -- A limit range defined in your project, where the defaults from the `LimitRange` object apply to pods created during the deployment process. --- -+ -To set deployment resources, choose one of the above options. Otherwise, deploy pod creation fails, citing a failure to satisfy quota. diff --git a/modules/deployments-setting-triggers.adoc b/modules/deployments-setting-triggers.adoc deleted file mode 100644 index b31f1b6cc9cc..000000000000 --- a/modules/deployments-setting-triggers.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-setting-triggers_{context}"] -= Setting deployment triggers - -.Procedure - -. You can set deployment triggers for a `DeploymentConfig` object using the `oc set triggers` command. For example, to set a image change trigger, use the following command: -+ -[source,terminal] ----- -$ oc set triggers dc/<dc_name> \ - --from-image=<project>/<image>:<tag> -c <container_name> ----- diff --git a/modules/deployments-starting-deployment.adoc b/modules/deployments-starting-deployment.adoc deleted file mode 100644 index 5bbce6150665..000000000000 --- a/modules/deployments-starting-deployment.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-starting-a-deployment_{context}"] -= Starting a deployment - -You can start a rollout to begin the deployment process of your application. - -.Procedure - -. To start a new deployment process from an existing `DeploymentConfig` object, run the following command: -+ -[source,terminal] ----- -$ oc rollout latest dc/<name> ----- -+ -[NOTE] -==== -If a deployment process is already in progress, the command displays a message and a new replication controller will not be deployed. -==== diff --git a/modules/deployments-triggers.adoc b/modules/deployments-triggers.adoc deleted file mode 100644 index 5140677f9507..000000000000 --- a/modules/deployments-triggers.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -[id="deployments-triggers_{context}"] -= Deployment triggers - -A `DeploymentConfig` object can contain triggers, which drive the creation of new deployment processes in response to events inside the cluster. - -[WARNING] -==== -If no triggers are defined on a `DeploymentConfig` object, a config change trigger is added by default. If triggers are defined as an empty field, deployments must be started manually. -==== - -[discrete] -[id="deployments-configchange-trigger_{context}"] -=== Config change deployment triggers - -The config change trigger results in a new replication controller whenever configuration changes are detected in the pod template of the `DeploymentConfig` object. - -[NOTE] -==== -If a config change trigger is defined on a `DeploymentConfig` object, the first replication controller is automatically created soon after the `DeploymentConfig` object itself is created and it is not paused. -==== - -.Config change deployment trigger -[source,yaml] ----- -triggers: - - type: "ConfigChange" ----- - -[discrete] -[id="deployments-imagechange-trigger_{context}"] -=== Image change deployment triggers - -The image change trigger results in a new replication controller whenever the content of an image stream tag changes (when a new version of the image is pushed). - -.Image change deployment trigger -[source,yaml] ----- -triggers: - - type: "ImageChange" - imageChangeParams: - automatic: true <1> - from: - kind: "ImageStreamTag" - name: "origin-ruby-sample:latest" - namespace: "myproject" - containerNames: - - "helloworld" ----- -<1> If the `imageChangeParams.automatic` field is set to `false`, the trigger is disabled. - -With the above example, when the `latest` tag value of the `origin-ruby-sample` image stream changes and the new image value differs from the current image specified in the `DeploymentConfig` object's `helloworld` container, a new replication controller is created using the new image for the `helloworld` container. - -[NOTE] -==== -If an image change trigger is defined on a `DeploymentConfig` object (with a config change trigger and `automatic=false`, or with `automatic=true`) and the image stream tag pointed by the image change trigger does not exist yet, the initial deployment process will automatically start as soon as an image is imported or pushed by a build to the image stream tag. -==== diff --git a/modules/deployments-viewing-deployment.adoc b/modules/deployments-viewing-deployment.adoc deleted file mode 100644 index 15ac06a8168a..000000000000 --- a/modules/deployments-viewing-deployment.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-viewing-a-deployment_{context}"] -= Viewing a deployment - -You can view a deployment to get basic information about all the available revisions of your application. - -.Procedure - -. To show details about all recently created replication controllers for the provided `DeploymentConfig` object, including any currently running deployment process, run the following command: -+ -[source,terminal] ----- -$ oc rollout history dc/<name> ----- - -. To view details specific to a revision, add the `--revision` flag: -+ -[source,terminal] ----- -$ oc rollout history dc/<name> --revision=1 ----- - -. For more detailed information about a `DeploymentConfig` object and its latest revision, use the `oc describe` command: -+ -[source,terminal] ----- -$ oc describe dc <name> ----- diff --git a/modules/deployments-viewing-logs.adoc b/modules/deployments-viewing-logs.adoc deleted file mode 100644 index 417ea9833a8f..000000000000 --- a/modules/deployments-viewing-logs.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/managing-deployment-processes.adoc - -:_content-type: PROCEDURE -[id="deployments-viewing-logs_{context}"] -= Viewing deployment logs - -.Procedure - -. To stream the logs of the latest revision for a given `DeploymentConfig` object: -+ -[source,terminal] ----- -$ oc logs -f dc/<name> ----- -+ -If the latest revision is running or failed, the command returns the logs of the process that is responsible for deploying your pods. If it is successful, it returns the logs from a pod of your application. - -. You can also view logs from older failed deployment processes, if and only if these processes (old replication controllers and their deployer pods) exist and have not been pruned or deleted manually: -+ -[source,terminal] ----- -$ oc logs --version=1 dc/<name> ----- diff --git a/modules/deprecated-feature.adoc b/modules/deprecated-feature.adoc deleted file mode 100644 index 5e6f5b7e3445..000000000000 --- a/modules/deprecated-feature.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// When including this file, ensure that {FeatureName} is set immediately before -// the include. Otherwise it will result in an incorrect replacement. - -[IMPORTANT] -==== -[subs="attributes+"] -{FeatureName} is a deprecated feature. Deprecated functionality is still included in {product-title} and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -For the most recent list of major functionality that has been deprecated or removed within {product-title}, refer to the _Deprecated and removed features_ section of the {product-title} release notes. -==== -// Undefine {FeatureName} attribute, so that any mistakes are easily spotted -:!FeatureName: diff --git a/modules/describe-function-kn.adoc b/modules/describe-function-kn.adoc deleted file mode 100644 index d86daf880a8e..000000000000 --- a/modules/describe-function-kn.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: PROCEDURE -[id="describe-function-kn_{context}"] -= Describing a function - -The `kn func info` command prints information about a deployed function, such as the function name, image, namespace, Knative service information, route information, and event subscriptions. - -.Procedure - -* Describe a function: -+ -[source,termnal] ----- -$ kn func info [-f <format> -n <namespace> -p <path>] ----- -+ -.Example command -[source,terminal] ----- -$ kn func info -p function/example-function ----- -+ -.Example output -[source,terminal] ----- -Function name: - example-function -Function is built in image: - docker.io/user/example-function:latest -Function is deployed as Knative Service: - example-function -Function is deployed in namespace: - default -Routes: - http://example-function.default.apps.ci-ln-g9f36hb-d5d6b.origin-ci-int-aws.dev.rhcloud.com ----- diff --git a/modules/determining-upgrade-viability-conditiontype.adoc b/modules/determining-upgrade-viability-conditiontype.adoc deleted file mode 100644 index 775b6e771261..000000000000 --- a/modules/determining-upgrade-viability-conditiontype.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/index.adoc - -:_content-type: CONCEPT -[id="understanding_clusteroperator_conditiontypes_{context}"] -= Understanding cluster Operator condition types - -The status of cluster Operators includes their condition type, which informs you of the current state of your Operator's health. The following definitions cover a list of some common ClusterOperator condition types. Operators that have additional condition types and use Operator-specific language have been omitted. - -The Cluster Version Operator (CVO) is responsible for collecting the status conditions from cluster Operators so that cluster administrators can better understand the state of the {product-title} cluster. - -//Condition types, as well as additional information about your operator, can be retrieved in either YAML or JSON format through the `oc get clusterversion -o` command: - -//[source,terminal] -//---- -//$ oc get clusterversion -o yaml -//---- - - -* Available: -The condition type `Available` indicates that an Operator is functional and available in the cluster. If the status is `False`, at least one part of the operand is non-functional and the condition requires an administrator to intervene. - -* Progressing: -The condition type `Progressing` indicates that an Operator is actively rolling out new code, propagating configuration changes, or otherwise moving from one steady state to another. -+ -Operators do not report the condition type `Progressing` as `True` when they are reconciling a previous known state. If the observed cluster state has changed and the Operator is reacting to it, then the status reports back as `True`, since it is moving from one steady state to another. -+ -* Degraded: -The condition type `Degraded` indicates that an Operator has a current state that does not match its required state over a period of time. The period of time can vary by component, but a `Degraded` status represents persistent observation of an Operator's condition. As a result, an Operator does not fluctuate in and out of the `Degraded` state. -+ -There might be a different condition type if the transition from one state to another does not persist over a long enough period to report `Degraded`. -An Operator does not report `Degraded` during the course of a normal update. An Operator may report `Degraded` in response to a persistent infrastructure failure that requires eventual administrator intervention. -+ -[NOTE] -==== -This condition type is only an indication that something may need investigation and adjustment. As long as the Operator is available, the `Degraded` condition does not cause user workload failure or application downtime. -==== -+ -* Upgradeable: -The condition type `Upgradeable` indicates whether the Operator is safe to update based on the current cluster state. The message field contains a human-readable description of what the administrator needs to do for the cluster to successfully update. The CVO allows updates when this condition is `True`, `Unknown` or missing. -+ -When the `Upgradeable` status is `False`, only minor updates are impacted, and the CVO prevents the cluster from performing impacted updates unless forced. diff --git a/modules/determining-upgrade-viability-cv-conditiontype.adoc b/modules/determining-upgrade-viability-cv-conditiontype.adoc deleted file mode 100644 index 17d7b7a08457..000000000000 --- a/modules/determining-upgrade-viability-cv-conditiontype.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/index.adoc - -:_content-type: CONCEPT -[id="understanding-clusterversion-conditiontypes_{context}"] -= Understanding cluster version condition types - -The Cluster Version Operator (CVO) monitors cluster Operators and other components, and is responsible for collecting the status of both the cluster version and its Operators. This status includes the condition type, which informs you of the health and current state of the {product-title} cluster. - -In addition to `Available`, `Progressing`, and `Upgradeable`, there are condition types that affect cluster versions and Operators. - -* Failing: -The cluster version condition type `Failing` indicates that a cluster cannot reach its desired state, is unhealthy, and requires an administrator to intervene. - -* Invalid: -The cluster version condition type `Invalid` indicates that the cluster version has an error that prevents the server from taking action. The CVO only reconciles the current state as long as this condition is set. - -* RetrievedUpdates: -The cluster version condition type `RetrievedUpdates` indicates whether or not available updates have been retrieved from the upstream update server. The condition is `Unknown` before retrieval, `False` if the updates either recently failed or could not be retrieved, or `True` if the `availableUpdates` field is both recent and accurate. - -* ReleaseAccepted: -The cluster version condition type `ReleaseAccepted` with a `True` status indicates that the requested release payload was successfully loaded without failure during image verification and precondition checking. - -* ImplicitlyEnabledCapabilities: -The cluster version condition type `ImplicitlyEnabledCapabilities` with a `True` status indicates that there are enabled capabilities that the user is not currently requesting through `spec.capabilities`. The CVO does not support disabling capabilities if any associated resources were previously managed by the CVO. - - diff --git a/modules/determining-where-installation-issues-occur.adoc b/modules/determining-where-installation-issues-occur.adoc deleted file mode 100644 index 2df257792878..000000000000 --- a/modules/determining-where-installation-issues-occur.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -[id="determining-where-installation-issues-occur_{context}"] -= Determining where installation issues occur - -When troubleshooting {product-title} installation issues, you can monitor installation logs to determine at which stage issues occur. Then, retrieve diagnostic data relevant to that stage. - -{product-title} installation proceeds through the following stages: - -. Ignition configuration files are created. - -. The bootstrap machine boots and starts hosting the remote resources required for the control plane machines to boot. - -. The control plane machines fetch the remote resources from the bootstrap machine and finish booting. - -. The control plane machines use the bootstrap machine to form an etcd cluster. - -. The bootstrap machine starts a temporary Kubernetes control plane using the new etcd cluster. - -. The temporary control plane schedules the production control plane to the control plane machines. - -. The temporary control plane shuts down and passes control to the production control plane. - -. The bootstrap machine adds {product-title} components into the production control plane. - -. The installation program shuts down the bootstrap machine. - -. The control plane sets up the worker nodes. - -. The control plane installs additional services in the form of a set of Operators. - -. The cluster downloads and configures remaining components needed for the day-to-day operation, including the creation of worker machines in supported environments. diff --git a/modules/developer-cli-odo-about-devfiles-in-odo.adoc b/modules/developer-cli-odo-about-devfiles-in-odo.adoc deleted file mode 100644 index e4e97bc98203..000000000000 --- a/modules/developer-cli-odo-about-devfiles-in-odo.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: CONCEPT -[id="about-the-devfile-in-odo"] -= About the devfile in {odo-title} - -The devfile is a portable file that describes your development environment. -With the devfile, you can define a portable developmental environment without the need for reconfiguration. - -With the devfile, you can describe your development environment, such as the source code, IDE tools, application runtimes, and predefined commands. To learn more about the devfile, see link:https://redhat-developer.github.io/devfile/[the devfile documentation]. - -With `{odo-title}`, you can create components from the devfiles. When creating a component by using a devfile, `{odo-title}` transforms the devfile into a workspace consisting of multiple containers that run on {product-title}, Kubernetes, or Docker. -`{odo-title}` automatically uses the default devfile registry but users can add their own registries. diff --git a/modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc b/modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc deleted file mode 100644 index 290da8336df9..000000000000 --- a/modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-single-component-application-with-odo.adoc - -:_content-type: PROCEDURE -[id="adding-a-custom-builder-to-specify-a-build-image_{context}"] -= Adding a custom builder to specify a build image - -With {product-title}, you can add a custom image to bridge the gap between the creation of custom images. - -The following example demonstrates the successful import and use of the `redhat-openjdk-18` image: - -.Prerequisites -* The OpenShift CLI (oc) is installed. - -.Procedure - -. Import the image into {product-title}: -+ -[source,terminal] ----- -$ oc import-image openjdk18 \ ---from=registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift \ ---confirm ----- -. Tag the image to make it accessible to {odo-title}: -+ -[source,terminal] ----- -$ oc annotate istag/openjdk18:latest tags=builder ----- -. Deploy the image with {odo-title}: -+ -[source,terminal] ----- -$ odo create openjdk18 --git \ -https://github.com/openshift-evangelists/Wild-West-Backend ----- diff --git a/modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc b/modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc deleted file mode 100644 index 7ae9158d6d8f..000000000000 --- a/modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -//creating_and_deploying_applications_with_odo/working-with-storage.adoc - -:_content-type: PROCEDURE -[id="adding-storage-to-a-specific-container_{context}"] -= Adding storage to a specific container - -If your devfile has multiple containers, you can use the `--container` flag to specify the container you want to attach storage to. - -.Procedure - -. Create a devfile with multiple containers: -+ -[source,yaml] ----- -components: - - name: runtime <1> - container: - image: registry.access.redhat.com/ubi8/nodejs-12:1-36 - memoryLimit: 1024Mi - endpoints: - - name: "3000-tcp" - targetPort: 3000 - mountSources: true - - name: funtime <2> - container: - image: registry.access.redhat.com/ubi8/nodejs-12:1-36 - memoryLimit: 1024Mi ----- -<1> The `runtime` container. -<2> The `funtime` container. - -. To create storage for the `runtime` container: -+ -[source,terminal] ----- -$ odo storage create store --path /data --size 1Gi --container runtime ----- -+ -.Output of the command: -+ -[source,terminal] ----- -✓ Added storage store to nodejs-testing-xnfg - Please use `odo push` command to make the storage accessible to the component ----- - -. Verify that the storage is now attached to your component by listing all storage in the component: -+ -[source,terminal] ----- -$ odo storage list ----- -+ -.Example output -+ -[source,terminal] ----- -The component 'nodejs-testing-xnfg' has the following storage attached: - NAME SIZE PATH CONTAINER STATE - store 1Gi /data runtime Not Pushed ----- - -. Push the changes to the cluster: -+ -[source,terminal] ----- -$ odo push ----- diff --git a/modules/developer-cli-odo-adding-storage-to-the-application-components.adoc b/modules/developer-cli-odo-adding-storage-to-the-application-components.adoc deleted file mode 100644 index 8237f11d6462..000000000000 --- a/modules/developer-cli-odo-adding-storage-to-the-application-components.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// *cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc -// *cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc - -:_content-type: PROCEDURE -[id="adding-storage-to-the-application-components_{context}"] -= Adding storage to the application components - -Use the `odo storage` command to add persistent data to your application. Examples of data that must persist include database files, dependencies, and build artifacts, such as a `.m2` Maven directory. - -.Procedure - -. Add the storage to your component: -+ -[source,terminal] ----- -$ odo storage create <storage_name> --path=<path_to_the_directory> --size=<size> ----- - -. Push the storage to the cluster: -+ -[source,terminal] ----- -$ odo push ----- - -. Verify that the storage is now attached to your component by listing all storage in the component: -+ -[source,terminal] ----- -$ odo storage list ----- -+ -.Example output -+ -[source,terminal] ----- -The component 'nodejs' has the following storage attached: -NAME SIZE PATH STATE -mystorage 1Gi /data Pushed ----- - -. Delete the storage from your component: -+ -[source,terminal] ----- -$ odo storage delete <storage_name> ----- - -. List all storage to verify that the storage state is `Locally Deleted`: -+ -[source,terminal] ----- -$ odo storage list ----- -+ -.Example output -+ -[source,terminal] ----- -The component 'nodejs' has the following storage attached: -NAME SIZE PATH STATE -mystorage 1Gi /data Locally Deleted ----- - -. Push the changes to the cluster: -+ -[source,terminal] ----- -$ odo push ----- diff --git a/modules/developer-cli-odo-configuring-debugging-parameters.adoc b/modules/developer-cli-odo-configuring-debugging-parameters.adoc deleted file mode 100644 index 51c14736c32b..000000000000 --- a/modules/developer-cli-odo-configuring-debugging-parameters.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc - -:_content-type: PROCEDURE -[id="configuring-debugging-parameters_{context}"] - -= Configuring debugging parameters - -You can specify a remote port with `odo config` command and a local port with the `odo debug` command. - -.Procedure - -* To set a remote port on which the debugging agent should run, run: -+ -[source,terminal] ----- -$ odo config set DebugPort 9292 ----- -+ -[NOTE] -==== -You must redeploy your component for this value to be reflected on the component. -==== - -* To set a local port to port forward, run: -+ -[source,terminal] ----- -$ odo debug port-forward --local-port 9292 ----- -+ -[NOTE] -==== -The local port value does not persist. You must provide it every time you need to change the port. -==== diff --git a/modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc b/modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc deleted file mode 100644 index 4835230bfbf4..000000000000 --- a/modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-java-application-with-a-database - -:_content-type: PROCEDURE -[id="connecting-a-java-application-to-a-database_{context}"] -= Connecting a Java application to a database - -To connect your Java application to the database, use the `odo link` command. - -.Procedure - -. Display the list of services: -+ -[source,terminal] ----- -$ odo service list ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -Database/sampledatabase 6m31s ----- - -. Connect the database to your application: -+ -[source,terminal] ----- -$ odo link Database/sampledatabase ----- - -. Push the changes to your cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -After the link has been created and pushed, a secret that contains the database connection data is created. - -. Check the component for values injected from the database service: -+ -[source,sh] ----- -$ odo exec -- bash -c 'env | grep DATABASE' -declare -x DATABASE_CLUSTERIP="10.106.182.173" -declare -x DATABASE_DB_NAME="sampledb" -declare -x DATABASE_DB_PASSWORD="samplepwd" -declare -x DATABASE_DB_USER="sampleuser" ----- - -. Open the URL of your Java application and navigate to the `CreatePerson.xhtml` data entry page. Enter a username and age by using the form. Click *Save*. -+ -Note that now you can see the data in the database by clicking the *View Persons Record List* link. -+ -You can also use a CLI tool such as `psql` to manipulate the database. diff --git a/modules/developer-cli-odo-connecting-the-database.adoc b/modules/developer-cli-odo-connecting-the-database.adoc deleted file mode 100644 index b60d9afcef5e..000000000000 --- a/modules/developer-cli-odo-connecting-the-database.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent-application-with-odo.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc - -[id="Connecting-the-database-to-the-front-end-application_{context}"] -= Connecting the database to the front-end application - -. Link the database to the front-end service: -+ -[source,terminal] ----- -$ odo link mongodb-persistent ----- -+ -.Example output -[source,terminal] ----- - ✓ Service mongodb-persistent has been successfully linked from the component nodejs-nodejs-ex-mhbb - -Following environment variables were added to nodejs-nodejs-ex-mhbb component: -- database_name -- password -- uri -- username -- admin_password ----- - -. See the environment variables of the application and the database in the pod: - -.. Get the pod name: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -mongodb-1-gsznc 1/1 Running 0 28m -nodejs-nodejs-ex-mhbb-app-4-vkn9l 1/1 Running 0 1m ----- - -.. Connect to the pod: -+ -[source,terminal] ----- -$ oc rsh nodejs-nodejs-ex-mhbb-app-4-vkn9l ----- - -.. Check the environment variables: -+ -[source,terminal] ----- -sh-4.2$ env ----- -+ -.Example output -[source,terminal] ----- -uri=mongodb://172.30.126.3:27017 -password=dHIOpYneSkX3rTLn -database_name=sampledb -username=user43U -admin_password=NCn41tqmx7RIqmfv ----- - -. Open the URL in the browser and notice the database configuration in the bottom right: -+ -[source,terminal] ----- -$ odo url list ----- -+ -.Example output -[source,terminal] ----- -Request information -Page view count: 24 - -DB Connection Info: -Type: MongoDB -URL: mongodb://172.30.126.3:27017/sampledb ----- diff --git a/modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc b/modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc deleted file mode 100644 index 8351710b60c3..000000000000 --- a/modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-single-component-application-with-odo.adoc - -:_content-type: PROCEDURE -[id="connecting-your-application-to-multiple-services-using-openshift-service-catalog_{context}"] - -= Connecting your application to multiple services using OpenShift Service Catalog - -The OpenShift service catalog is an implementation of the Open Service Broker API (OSB API) for Kubernetes. You can use it to connect applications deployed in {product-title} to a variety of services. - -.Prerequisites - -* You have a running {product-title} cluster. -* The service catalog is installed and enabled on your cluster. - -.Procedure - -* To list the services: -+ -[source,terminal] ----- -$ odo catalog list services ----- - -* To use service catalog-related operations: -+ -[source,terminal] ----- -$ odo service <verb> <service_name> ----- diff --git a/modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc b/modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc deleted file mode 100644 index c88d9727c68e..000000000000 --- a/modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module is included in the following assemblies: -// -// cli_reference/developer_cli_odo/using-devfiles-in-odo.adoc - -:_content-type: PROCEDURE -[id="converting-an-s2i-component-into-a-devfile-component_{context}"] -= Converting an S2I component into a devfile component - -With `{odo-title}`, you can create both Source-to-Image (S2I) and devfile components. If you have an existing S2I component, you can convert it into a devfile component using the `odo utils` command. - -.Procedure - -Run all the commands from the S2I component directory. - -. Run the `odo utils convert-to-devfile` command, which creates `devfile.yaml` and `env.yaml` based on your component: -+ -[source,terminal] ----- -$ odo utils convert-to-devfile ----- - -. Push the component to your cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -[NOTE] -==== -If the devfile component deployment failed, delete it by running: `odo delete -a` -==== -+ - -. Verify that the devfile component deployed successfully: -+ -[source,terminal] ----- -$ odo list ----- - -. Delete the S2I component: -+ -[source,terminal] ----- -$ odo delete --s2i ----- diff --git a/modules/developer-cli-odo-creating-a-database-with-odo.adoc b/modules/developer-cli-odo-creating-a-database-with-odo.adoc deleted file mode 100644 index e1da628aab9f..000000000000 --- a/modules/developer-cli-odo-creating-a-database-with-odo.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: - -:_content-type: PROCEDURE -[id="creating-a-database-with-odo_{context}"] - -= Creating a database with `odo` - -To create a database, you must have an access to the database Operator. For this example, Dev4Devs PostgreSQL Operator is used. - -.Procedure - - -. View the list of the services in your project: -+ -[source,terminal] ----- -$ odo catalog list services ----- -+ -.Example output ----- -Operators available in the cluster -NAME CRDs -postgresql-operator.v0.1.1 Backup, Database ----- - -. Store the YAML of the service in a file: -+ -[source,terminal] ----- -$ odo service create postgresql-operator.v0.1.1/Database --dry-run > db.yaml ----- - -. Add the following values under the `metadata:` section in the `db.yaml` file: -+ -[source,yaml] ----- - name: sampledatabase - annotations: - service.binding/db.name: 'path={.spec.databaseName}' - service.binding/db.password: 'path={.spec.databasePassword}' - service.binding/db.user: 'path={.spec.databaseUser}' ----- -+ -This configuration ensures that when a database service is started, appropriate annotations are added to it. Annotations help the Service Binding Operator in injecting the values for `databaseName`, `databasePassword`, and `databaseUser` into the application. - -. Change the following values under the `spec:` section of the YAML file: -+ -[source,yaml] ----- - databaseName: "<database_name>" - databasePassword: "<password>" - databaseUser: "<username>" ----- - -. Create a database from the YAML file: -+ -[source,terminal] ----- -$ odo service create --from-file db.yaml ----- -+ -A database instance is now present in your project. diff --git a/modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc b/modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc deleted file mode 100644 index b5ec4fd3bb21..000000000000 --- a/modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc +++ /dev/null @@ -1,194 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/ - -:_content-type: PROCEDURE -[id="developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster_{context}"] - -= Creating a Java application by using a devfile in a disconnected cluster - -[WARNING] -==== -This procedure is using external dependencies such as `quay.io/eclipse/che-java11-maven:nightly` or an example application `springboot-ex` that are not maintained by Red Hat. These dependencies are not maintained with the documentation and their functionality cannot be guaranteed. -==== - -.Prerequisites -* You have created and logged into a disconnected cluster. -* You have added `quay.io`, `registry.access.redhat.com`, `apache.org`, `quayio-production-s3.s3.amazonaws.com` URLs in your proxy configuration. - -.Procedure - -. Define your Java application in a devfile: -+ -.Example of a devfile -[source,yaml] ----- -schemaVersion: 2.0.0 -metadata: - name: java-maven - version: 1.1.0 -starterProjects: - - name: springbootproject - git: - remotes: - origin: "https://github.com/odo-devfiles/springboot-ex.git" -components: - - name: tools - container: - image: quay.io/eclipse/che-java11-maven:nightly - memoryLimit: 512Mi - mountSources: true - endpoints: - - name: 'http-8080' - targetPort: 8080 - volumeMounts: - - name: m2 - path: /home/user/.m2 - - name: m2 - volume: {} -commands: - - id: mvn-package - exec: - component: tools - commandLine: "mvn -Dmaven.repo.local=/home/user/.m2/repository -Dhttp.proxyHost=<proxy-host> -Dhttp.proxyPort=<proxy-port> -Dhttps.proxyHost=<proxy-host> -Dhttps.proxyPort=<proxy-port> package" - group: - kind: build - isDefault: true - - id: run - exec: - component: tools - commandLine: "java -jar target/*.jar" - group: - kind: run - isDefault: true - - id: debug - exec: - component: tools - commandLine: "java -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=${DEBUG_PORT},suspend=n -jar target/*.jar" - group: - kind: debug - isDefault: true ----- - -. Create a Java application: -+ -[source,terminal] ----- -$ odo create java-maven --devfile <path-to-your-devfile> --starter ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking devfile existence [87716ns] - ✓ Creating a devfile component from registry: DefaultDevfileRegistry [107247ns] - ✓ Validating devfile component [396971ns] - - Starter Project - ✓ Downloading starter project springbootproject from https://github.com/odo-devfiles/springboot-ex.git [2s] - -Please use `odo push` command to create the component with source deployed ----- - -. Push the changes to the cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -.Example output -[source,terminal] ----- -I0224 14:43:18.802512 34741 util.go:727] HTTPGetRequest: https://raw.githubusercontent.com/openshift/odo/master/build/VERSION -I0224 14:43:18.833631 34741 context.go:115] absolute devfile path: '/Users/pkumari/go/src/github.com/openshift/odo/testim/devfile.yaml' -[...] -Downloaded from central: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-utils/3.2.1/plexus-utils-3.2.1.jar (262 kB at 813 kB/s) -[INFO] Replacing main artifact with repackaged archive -[INFO] ------------------------------------------------------------------------ -[INFO] BUILD SUCCESS -[INFO] ------------------------------------------------------------------------ -[INFO] Total time: 19.638 s -[INFO] Finished at: 2021-02-24T08:59:30Z -[INFO] ------------------------------------------------------------------------ - ✓ Executing mvn-package command "mvn -Dmaven.repo.local=/home/user/.m2/repository -Dhttp.proxyHost=<proxy-host> -Dhttp.proxyPort=<proxy-port> -Dhttps.proxyHost=<proxy-host> -Dhttps.proxyPort=<proxy-port> package" [23s] - • Executing run command "java -jar target/*.jar" ... -I0224 14:29:30.557676 34426 exec.go:27] Executing command [/opt/odo/bin/supervisord ctl start devrun] for pod: java-maven-5b8f99fcdb-9dnk6 in container: tools -devrun: started - ✓ Executing run command "java -jar target/*.jar" [3s] - -Pushing devfile component java-maven - ✓ Changes successfully pushed to component ----- - -. Display the logs to verify that the application has started: -+ -[source,terminal] ----- -$ odo log ----- -+ -.Example output -[source,terminal] ----- -time="2021-02-24T08:58:58Z" level=info msg="create process:devrun" -time="2021-02-24T08:58:58Z" level=info msg="create process:debugrun" -time="2021-02-24T08:59:32Z" level=debug msg="no auth required" -time="2021-02-24T08:59:32Z" level=debug msg="succeed to find process:devrun" -time="2021-02-24T08:59:32Z" level=info msg="try to start program" program=devrun -time="2021-02-24T08:59:32Z" level=info msg="success to start program" program=devrun -ODO_COMMAND_RUN is java -jar target/*.jar -Executing command java -jar target/*.jar -[...] ----- - -. Create storage for your application: -+ -[source,terminal] ----- -$ odo storage create storage-name --path /data --size 5Gi ----- -+ -.Example output -[source,terminal] ----- -✓ Added storage storage-name to java-maven - -Please use `odo push` command to make the storage accessible to the component ----- - -. Push the changes to the cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -.Output -[source,terminal] ----- -✓ Waiting for component to start [310ms] - -Validation - ✓ Validating the devfile [100798ns] - -Creating Kubernetes resources for component java-maven - ✓ Waiting for component to start [30s] - ✓ Waiting for component to start [303ms] - -Applying URL changes - ✓ URLs are synced with the cluster, no changes are required. - -Syncing to component java-maven - ✓ Checking file changes for pushing [5ms] - ✓ Syncing files to the component [4s] - -Executing devfile commands for component java-maven - ✓ Waiting for component to start [526ms] - ✓ Executing mvn-package command "mvn -Dmaven.repo.local=/home/user/.m2/repository -Dhttp.proxyHost=<proxy-host> -Dhttp.proxyPort=<proxy-port> -Dhttps.proxyHost=<proxy-host> -Dhttps.proxyPort=<proxy-port> package" [10s] - ✓ Executing run command "java -jar target/*.jar" [3s] - -Pushing devfile component java-maven - ✓ Changes successfully pushed to component ----- diff --git a/modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc b/modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc deleted file mode 100644 index f604baea521a..000000000000 --- a/modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc +++ /dev/null @@ -1,110 +0,0 @@ -= Creating a Java MicroServices JPA application - -With `odo`, you can create and manage a sample Java MicroServices JPA application. - -.Procedure - -. Clone the sample application: -+ -[source,terminal] ----- -$ git clone -b jpa-sample https://github.com/redhat-developer/application-stack-samples.git ----- - -. Navigate to the application directory: -+ -[source,terminal] ----- -$ cd ./application-stack-samples/jpa ----- - -. Initialize the project: -+ -[source,terminal] ----- -$ odo create java-openliberty java-application ----- - -. Push the application to the cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -The application is now deployed to the cluster. - -. View the status of the cluster by streaming the {product-title} logs to the terminal: -+ -[source,terminal] ----- -$ odo log ----- -+ -Notice the test failures and `UnknownDatabaseHostException` error. This is because your application does not have a database yet: -+ -[source,terminal] ----- -[INFO] [err] java.net.UnknownHostException: ${DATABASE_CLUSTERIP} -[INFO] [err] at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:220) -[INFO] [err] at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403) -[INFO] [err] at java.base/java.net.Socket.connect(Socket.java:609) -[INFO] [err] at org.postgresql.core.PGStream.<init>(PGStream.java:68) -[INFO] [err] at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:144) -[INFO] [err] ... 86 more -[ERROR] Tests run: 2, Failures: 1, Errors: 1, Skipped: 0, Time elapsed: 0.706 s <<< FAILURE! - in org.example.app.it.DatabaseIT -[ERROR] testGetAllPeople Time elapsed: 0.33 s <<< FAILURE! -org.opentest4j.AssertionFailedError: Expected at least 2 people to be registered, but there were only: [] ==> expected: <true> but was: <false> - at org.example.app.it.DatabaseIT.testGetAllPeople(DatabaseIT.java:57) - -[ERROR] testGetPerson Time elapsed: 0.047 s <<< ERROR! -java.lang.NullPointerException - at org.example.app.it.DatabaseIT.testGetPerson(DatabaseIT.java:41) - -[INFO] -[INFO] Results: -[INFO] -[ERROR] Failures: -[ERROR] DatabaseIT.testGetAllPeople:57 Expected at least 2 people to be registered, but there were only: [] ==> expected: <true> but was: <false> -[ERROR] Errors: -[ERROR] DatabaseIT.testGetPerson:41 NullPointer -[INFO] -[ERROR] Tests run: 2, Failures: 1, Errors: 1, Skipped: 0 -[INFO] -[ERROR] Integration tests failed: There are test failures. ----- - -. Create an ingress URL to access the application: -+ -[source,terminal] ----- -$ odo url create --port 8080 ----- - -. Push the changes to your cluster: -+ -[source,terminal] ----- -$ odo push ----- - -. Display the created URL: -+ -[source,terminal] ----- -$ odo url list ----- -+ -.Example output -[source,terminal] ----- -Found the following URLs for component mysboproj -NAME STATE URL PORT SECURE KIND -java-application-8080 Pushed http://java-application-8080.apps-crc.testing 8080 false ingress ----- -+ -The application is now deployed to the cluster and you can access it by using the URL that is created. - -. Use the URL to navigate to the `CreatePerson.xhtml` data entry page and enter a username and age by using the form. Click *Save*. -+ -Note that you cannot see the data by clicking the *View Persons Record List* link since your application does not have a database connected yet. diff --git a/modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc b/modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc deleted file mode 100644 index 266c404ec41b..000000000000 --- a/modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/ - -:_content-type: PROCEDURE -[id="developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster_{context}"] - -= Creating a NodeJS application by using a devfile in a disconnected cluster - -[WARNING] -==== -This procedure is using external dependencies such as `nodejs-ex.git` application that are not maintained by Red Hat. These dependencies are not maintained with the documentation and their functionality cannot be guaranteed. -==== - -.Prerequisites -* You have created and logged into a disconnected cluster. -* You have added `raw.githubusercontent.com`, `registry.access.redhat.com`, and `registry.npmjs.org` URLs in your proxy. - -.Procedure - -. Define your NodeJS application in a devfile: -+ -.Example of a devfile -[source,yaml] ----- -schemaVersion: 2.0.0 -metadata: -name: nodejs -starterProjects: -- name: nodejs-starter - git: - remotes: - origin: "https://github.com/odo-devfiles/nodejs-ex.git" -components: -- name: runtime - container: - image: registry.access.redhat.com/ubi8/nodejs-12:1-36 - memoryLimit: 1024Mi - endpoints: - - name: "3000/tcp" - targetPort: 3000 - env: - - name: HTTP_PROXY - value: http://<proxy-host>:<proxy-port> - - name: HTTPS_PROXY - value: http://<proxy-host>:<proxy-port> - mountSources: true -commands: -- id: devbuild - exec: - component: runtime - commandLine: npm install - workingDir: ${PROJECTS_ROOT} - group: - kind: build - isDefault: true -- id: build - exec: - component: runtime - commandLine: npm install - workingDir: ${PROJECTS_ROOT} - group: - kind: build -- id: devrun - exec: - component: runtime - commandLine: npm start - workingDir: ${PROJECTS_ROOT} - group: - kind: run - isDefault: true -- id: run - exec: - component: runtime - commandLine: npm start - workingDir: ${PROJECTS_ROOT} - group: - kind: run ----- - -. Create the application and push the changes to the cluster: -+ -[source,terminal] ----- -$ odo create nodejs --devfile <path-to-your-devfile> --starter $$ odo push ----- -+ -.Example output -[source,terminal] ----- -[...] -Pushing devfile component nodejs - ✓ Changes successfully pushed to component ----- - -. Create a URL to access your application and push it to the cluster: -+ -[source,terminal] ----- -$ odo url create url1 --port 3000 --host example.com --ingress && odo push ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Validating the devfile [145374ns] - -Creating Kubernetes resources for component nodejs - ✓ Waiting for component to start [14s] - -Applying URL changes - ✓ URL url1: http://url1.abcdr.com/ created - -Syncing to component nodejs - ✓ Checking file changes for pushing [2ms] - ✓ Syncing files to the component [3s] - -Executing devfile commands for component nodejs - ✓ Executing devbuild command "npm install" [4s] - ✓ Executing devrun command "npm start" [3s] - -Pushing devfile component nodejs - ✓ Changes successfully pushed to component ----- - -. Add the storage to your application -+ -[source,terminal] ----- -$ odo storage create <storage-name> --path /data --size 5Gi ----- -+ -.Example output -[source,terminal] ----- -✓ Added storage abcde to nodejs - -Please use `odo push` command to make the storage accessible to the component ----- - -. Push the changes to the cluster: -+ -[source,terminal] ----- -$ odo push ----- diff --git a/modules/developer-cli-odo-creating-a-project.adoc b/modules/developer-cli-odo-creating-a-project.adoc deleted file mode 100644 index acf1ffc53218..000000000000 --- a/modules/developer-cli-odo-creating-a-project.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects - -:_content-type: PROCEDURE -[id="creating-a-project_{context}"] -= Creating a project - -Create a project to keep your source code, tests, and libraries organized in a separate single unit. - -.Procedure - -. Log in to an {product-title} cluster: -+ -[source,terminal] ----- -$ odo login -u developer -p developer ----- - -. Create a project: -+ -[source,terminal] ----- -$ odo project create myproject ----- -+ -.Example output -[source,terminal] ----- - ✓ Project 'myproject' is ready for use - ✓ New project created and now using project : myproject ----- diff --git a/modules/developer-cli-odo-creating-a-service-from-an-operator.adoc b/modules/developer-cli-odo-creating-a-service-from-an-operator.adoc deleted file mode 100644 index debfd096a114..000000000000 --- a/modules/developer-cli-odo-creating-a-service-from-an-operator.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-the-operators.adoc - -[id="creating-a-service-from-an-operator_{context}"] - -= Creating a service from an Operator - -If an Operator has valid values defined in its `metadata` to start the requested service, you can use the service with `odo service create`. - -. Print the YAML of the service as a file on your local drive: -+ -[source,terminal] ----- -$ oc get csv/etcdoperator.v0.9.4 -o yaml ----- - -. Verify that the values of the service are valid: -+ -[source,terminal] ----- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - name: example -spec: - size: 3 - version: 3.2.13 ----- - -. Start an `EtcdCluster` service from the `etcdoperator.v0.9.4` Operator: -+ -[source,terminal] ----- -$ odo service create etcdoperator.v0.9.4 EtcdCluster ----- - -. Verify that a service has started: -+ -[source,terminal] ----- -$ oc get EtcdCluster ----- diff --git a/modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc b/modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc deleted file mode 100644 index 77b3746a88ad..000000000000 --- a/modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-single-component-application-with-odo -// * cli_reference/developer_cli_odo/using-odo-in-a-restricted-environment/creating-and-deploying-a-component-to-the-restricted-cluster -ifeval::["{context}" == "creating-a-single-component-application-with-odo"] -:single: -endif::[] -ifeval::["{context}" == "creating-a-multicomponent-application-with-odo"] -:multi: -endif::[] - -:_content-type: PROCEDURE -[id="creating-a-nodejs-application-with-odo_{context}"] -= Creating a Node.js application with {odo-title} - -To create a Node.js component, download the Node.js application and push the source code to your cluster with `{odo-title}`. - -.Procedure - -ifdef::single[] - -. Create a directory for your components: -+ -[source,terminal] ----- -$ mkdir my_components && cd my_components ----- - -. Download the example Node.js application: -+ -[source,terminal] ----- -$ git clone https://github.com/openshift/nodejs-ex ----- -endif::single[] - -ifdef::multi[] - -. Download the example front-end application: -+ -[source,terminal] ----- -$ git clone https://github.com/openshift-evangelists/Wild-West-Frontend frontend ----- - -endif::multi[] - - -. Change the current directory to the directory with your application: -+ -[source,terminal] ----- -$ cd <directory_name> ----- - -. Add a component of the type Node.js to your application: -+ -[source,terminal] ----- -$ odo create nodejs ----- -+ -[NOTE] -==== -By default, the latest image is used. You can also explicitly specify an image version by using `odo create openshift/nodejs:8`. -==== - -. Push the initial source code to the component: -+ -[source,terminal] ----- -$ odo push ----- -+ -Your component is now deployed to {product-title}. - -. Create a URL and add an entry in the local configuration file as follows: -+ -[source,terminal] ----- -$ odo url create --port 8080 ----- -+ -. Push the changes. This creates a URL on the cluster. -+ -[source,terminal] ----- -$ odo push ----- -+ -. List the URLs to check the desired URL for the component. -+ -[source,terminal] ----- -$ odo url list ----- -+ -. View your deployed application using the generated URL. -+ -[source,terminal] ----- -$ curl <url> ----- - -ifeval::["{context}" == "creating-a-single-component-application-with-odo"] -:!single: -endif::[] -ifeval::["{context}" == "creating-a-multicomponent-application-with-odo"] -:!multi: -endif::[] diff --git a/modules/developer-cli-odo-creating-services-from-yaml-files.adoc b/modules/developer-cli-odo-creating-services-from-yaml-files.adoc deleted file mode 100644 index f33b7a9e91e0..000000000000 --- a/modules/developer-cli-odo-creating-services-from-yaml-files.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-the-operators.adoc - -[id="creating-services-from-yaml-files_{context}"] - -= Creating services from YAML files - -If the YAML definition of the service or custom resource (CR) has invalid or placeholder data, you can use the `--dry-run` flag to get the YAML definition, specify the correct values, and start the service using the corrected YAML definition. -Printing and modifying the YAML used to start a service -`{odo-title}` provides the feature to print the YAML definition of the service or CR provided by the Operator before starting a service. - -. To display the YAML of the service, run: -+ -[source,terminal] ----- -$ odo service create <operator-name> --dry-run ----- -+ -For example, to print YAML definition of `EtcdCluster` provided by the `etcdoperator.v0.9.4` Operator, run: -+ -[source,terminal] ----- -$ odo service create etcdoperator.v0.9.4 --dry-run ----- -+ -The YAML is saved as the `etcd.yaml` file. - -. Modify the `etcd.yaml` file: -+ -[source,yaml] ----- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - name: my-etcd-cluster // <1> -spec: - size: 1 // <2> - version: 3.2.13 ----- -+ -<1> Change the name from `example` to `my-etcd-cluster` -<2> Reduce the size from `3` to `1` - -. Start a service from the YAML file: -+ -[source,terminal] ----- -$ odo service create --from-file etcd.yaml ----- - -. Verify that the `EtcdCluster` service has started with one pod instead of the pre-configured three pods: -+ -[source,terminal] ----- -$ oc get pods | grep my-etcd-cluster ----- diff --git a/modules/developer-cli-odo-debugging-an-application.adoc b/modules/developer-cli-odo-debugging-an-application.adoc deleted file mode 100644 index a4fd9cc5d0ba..000000000000 --- a/modules/developer-cli-odo-debugging-an-application.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc - -:_content-type: PROCEDURE -[id="debugging-an-application_{context}"] - -= Debugging an application - -You can debug your application in `odo` with the `odo debug` command. - -.Procedure - -. Download the sample application that contains the necessary `debugrun` step within its devfile: -+ -[source,terminal] ----- -$ odo create nodejs --starter ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking devfile existence [11498ns] - ✓ Checking devfile compatibility [15714ns] - ✓ Creating a devfile component from registry: DefaultDevfileRegistry [17565ns] - ✓ Validating devfile component [113876ns] - -Starter Project - ✓ Downloading starter project nodejs-starter from https://github.com/odo-devfiles/nodejs-ex.git [428ms] - -Please use `odo push` command to create the component with source deployed ----- - -. Push the application with the `--debug` flag, which is required for all debugging deployments: -+ -[source,terminal] ----- -$ odo push --debug ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Validating the devfile [29916ns] - -Creating Kubernetes resources for component nodejs - ✓ Waiting for component to start [38ms] - -Applying URL changes - ✓ URLs are synced with the cluster, no changes are required. - -Syncing to component nodejs - ✓ Checking file changes for pushing [1ms] - ✓ Syncing files to the component [778ms] - -Executing devfile commands for component nodejs - ✓ Executing install command "npm install" [2s] - ✓ Executing debug command "npm run debug" [1s] - -Pushing devfile component nodejs - ✓ Changes successfully pushed to component ----- -+ -[NOTE] -==== -You can specify a custom debug command by using the `--debug-command="custom-step"` flag. -==== - -. Port forward to the local port to access the debugging interface: -+ -[source,terminal] ----- -$ odo debug port-forward ----- -+ -.Example output -[source,terminal] ----- -Started port forwarding at ports - 5858:5858 ----- -+ -[NOTE] -==== -You can specify a port by using the `--local-port` flag. -==== - -. Check that the debug session is running in a separate terminal window: -+ -[source,terminal] ----- -$ odo debug info ----- -+ -.Example output -[source,terminal] ----- -Debug is running for the component on the local port : 5858 ----- - -. Attach the debugger that is bundled in your IDE of choice. Instructions vary depending on your IDE, for example: link:https://code.visualstudio.com/docs/nodejs/nodejs-debugging#_remote-debugging[VSCode debugging interface]. diff --git a/modules/developer-cli-odo-deleting-an-application.adoc b/modules/developer-cli-odo-deleting-an-application.adoc deleted file mode 100644 index 3427953a2747..000000000000 --- a/modules/developer-cli-odo-deleting-an-application.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc - -:_content-type: PROCEDURE -[id="deleting-an-application_{context}"] -= Deleting an application - -Use the `odo app delete` command to delete your application. - -.Procedure - -. List the applications in the current project: -+ -[source,terminal] ----- -$ odo app list ----- -+ -.Example output -[source,terminal] ----- - The project '<project_name>' has the following applications: - NAME - app ----- - -. List the components associated with the applications. These components will be deleted with the application: -+ -[source,terminal] ----- -$ odo component list ----- -+ -.Example output -[source,terminal] ----- - APP NAME TYPE SOURCE STATE - app nodejs-nodejs-ex-elyf nodejs file://./ Pushed ----- - -. Delete the application: -+ -[source,terminal] ----- -$ odo app delete <application_name> ----- -+ -.Example output -[source,terminal] ----- - ? Are you sure you want to delete the application: <application_name> from project: <project_name> ----- -. Confirm the deletion with `Y`. You can suppress the confirmation prompt using the `-f` flag. diff --git a/modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc b/modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc deleted file mode 100644 index 7459638ad96e..000000000000 --- a/modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc - -:_content-type: PROCEDURE -[id="deploying-a-database-in-interactive-mode_{context}"] -= Deploying a database in interactive mode - -{odo-title} provides a command-line interactive mode which simplifies deployment. - -.Procedure - -* Run the interactive mode and answer the prompts: -+ -[source,terminal] ----- -$ odo service create ----- -+ -.Example output -[source,terminal] ----- -? Which kind of service do you wish to create database -? Which database service class should we use mongodb-persistent -? Enter a value for string property DATABASE_SERVICE_NAME (Database Service Name): mongodb -? Enter a value for string property MEMORY_LIMIT (Memory Limit): 512Mi -? Enter a value for string property MONGODB_DATABASE (MongoDB Database Name): sampledb -? Enter a value for string property MONGODB_VERSION (Version of MongoDB Image): 3.2 -? Enter a value for string property VOLUME_CAPACITY (Volume Capacity): 1Gi -? Provide values for non-required properties No -? How should we name your service mongodb-persistent -? Output the non-interactive version of the selected options No -? Wait for the service to be ready No - ✓ Creating service [32ms] - ✓ Service 'mongodb-persistent' was created -Progress of the provisioning will not be reported and might take a long time. -You can see the current status by executing 'odo service list' ----- - -[NOTE] -==== -Your password or username will be passed to the front-end application as environment variables. -==== diff --git a/modules/developer-cli-odo-deploying-a-database-manually.adoc b/modules/developer-cli-odo-deploying-a-database-manually.adoc deleted file mode 100644 index 8a2f83462279..000000000000 --- a/modules/developer-cli-odo-deploying-a-database-manually.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc - -[id="deploying-a-database-manually_{context}"] -= Deploying a database manually - -. List the available services: -+ -[source,terminal] ----- -$ odo catalog list services ----- -+ -.Example output -[source,terminal] ----- -NAME PLANS -django-psql-persistent default -jenkins-ephemeral default -jenkins-pipeline-example default -mariadb-persistent default -mongodb-persistent default -mysql-persistent default -nodejs-mongo-persistent default -postgresql-persistent default -rails-pgsql-persistent default ----- - -. Choose the `mongodb-persistent` type of service and see the required parameters: -+ -[source,terminal] ----- -$ odo catalog describe service mongodb-persistent ----- -+ -.Example output -[source,terminal] ----- - *********************** | ***************************************************** - Name | default - ----------------- | ----------------- - Display Name | - ----------------- | ----------------- - Short Description | Default plan - ----------------- | ----------------- - Required Params without a | - default value | - ----------------- | ----------------- - Required Params with a default | DATABASE_SERVICE_NAME - value | (default: 'mongodb'), - | MEMORY_LIMIT (default: - | '512Mi'), MONGODB_VERSION - | (default: '3.2'), - | MONGODB_DATABASE (default: - | 'sampledb'), VOLUME_CAPACITY - | (default: '1Gi') - ----------------- | ----------------- - Optional Params | MONGODB_ADMIN_PASSWORD, - | NAMESPACE, MONGODB_PASSWORD, - | MONGODB_USER ----- - -. Pass the required parameters as flags and wait for the deployment of the database: -+ -[source,terminal] ----- -$ odo service create mongodb-persistent --plan default --wait -p DATABASE_SERVICE_NAME=mongodb -p MEMORY_LIMIT=512Mi -p MONGODB_DATABASE=sampledb -p VOLUME_CAPACITY=1Gi ----- diff --git a/modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc b/modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc deleted file mode 100644 index 6e5bd97d0c8b..000000000000 --- a/modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/creating-a-java-application-using-devfile - -:_content-type: PROCEDURE -[id="deploying-a-java-application-using-a-devfile_{context}"] -= Deploying a Java application using a devfile - -In this section, you will learn how to deploy a sample Java project that uses Maven and Java 8 JDK using a devfile. - -.Procedure - -. Create a directory to store the source code of your component: -+ -[source,terminal] ----- -$ mkdir <directory-name> ----- - -. Create a component configuration of Spring Boot component type named `myspring` and download its sample project: -+ -[source,terminal] ----- -$ odo create java-springboot myspring --starter ----- -+ -The previous command produces the following output: -+ -[source,terminal] ----- -Validation -✓ Checking devfile compatibility [195728ns] -✓ Creating a devfile component from registry: DefaultDevfileRegistry [170275ns] -✓ Validating devfile component [281940ns] - -Please use `odo push` command to create the component with source deployed ----- -+ -The `odo create` command downloads the associated `devfile.yaml` file from the recorded devfile registries. - -. List the contents of the directory to confirm that the devfile and the sample Java application were downloaded: -+ -[source,terminal] ----- -$ ls ----- -+ -The previous command produces the following output: -+ -[source,terminal] ----- -README.md devfile.yaml pom.xml src ----- - -. Create a URL to access the deployed component: -+ -[source,terminal] ----- -$ odo url create --host apps-crc.testing ----- -+ -The previous command produces the following output: -+ -[source,terminal] ----- -✓ URL myspring-8080.apps-crc.testing created for component: myspring - -To apply the URL configuration changes, please use odo push ----- -+ -[NOTE] -==== -You must use your cluster host domain name when creating the URL. -==== - -. Push the component to the cluster: -+ -[source,terminal] ----- -$ odo push ----- -+ -The previous command produces the following output: -+ -[source,terminal] ----- -Validation - ✓ Validating the devfile [81808ns] - -Creating Kubernetes resources for component myspring - ✓ Waiting for component to start [5s] - -Applying URL changes - ✓ URL myspring-8080: http://myspring-8080.apps-crc.testing created - -Syncing to component myspring - ✓ Checking files for pushing [2ms] - ✓ Syncing files to the component [1s] - -Executing devfile commands for component myspring - ✓ Executing devbuild command "/artifacts/bin/build-container-full.sh" [1m] - ✓ Executing devrun command "/artifacts/bin/start-server.sh" [2s] - -Pushing devfile component myspring - ✓ Changes successfully pushed to component ----- - -. List the URLs of the component to verify that the component was pushed successfully: -+ -[source,terminal] ----- -$ odo url list ----- -+ -The previous command produces the following output: -+ -[source,terminal] ----- -Found the following URLs for component myspring -NAME URL PORT SECURE -myspring-8080 http://myspring-8080.apps-crc.testing 8080 false ----- - -. View your deployed application by using the generated URL: -+ -[source,terminal] ----- -$ curl http://myspring-8080.apps-crc.testing ----- diff --git a/modules/developer-cli-odo-deploying-the-back-end-component.adoc b/modules/developer-cli-odo-deploying-the-back-end-component.adoc deleted file mode 100644 index 99e01e88ef9c..000000000000 --- a/modules/developer-cli-odo-deploying-the-back-end-component.adoc +++ /dev/null @@ -1,214 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent-application-with-odo.adoc - -:_content-type: PROCEDURE -[id="deploying-the-back-end-component_{context}"] - -= Deploying the back-end component - -To create a Java component, import the Java builder image, download the Java application and push the source code to your cluster with `{odo-title}`. - -.Procedure - -. Import `openjdk18` into the cluster: -+ -[source,terminal] ----- -$ oc import-image openjdk18 \ ---from=registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift --confirm ----- - -. Tag the image as `builder` to make it accessible for {odo-title}: -+ -[source,terminal] ----- -$ oc annotate istag/openjdk18:latest tags=builder ----- - -. Run `odo catalog list components` to see the created image: -+ -[source,terminal] ----- -$ odo catalog list components ----- -+ -.Example output -[source,terminal] ----- -Odo Devfile Components: -NAME DESCRIPTION REGISTRY -java-maven Upstream Maven and OpenJDK 11 DefaultDevfileRegistry -java-openliberty Open Liberty microservice in Java DefaultDevfileRegistry -java-quarkus Upstream Quarkus with Java+GraalVM DefaultDevfileRegistry -java-springboot Spring Boot® using Java DefaultDevfileRegistry -nodejs Stack with NodeJS 12 DefaultDevfileRegistry - -Odo OpenShift Components: -NAME PROJECT TAGS SUPPORTED -java openshift 11,8,latest YES -dotnet openshift 2.1,3.1,latest NO -golang openshift 1.13.4-ubi7,1.13.4-ubi8,latest NO -httpd openshift 2.4-el7,2.4-el8,latest NO -nginx openshift 1.14-el7,1.14-el8,1.16-el7,1.16-el8,latest NO -nodejs openshift 10-ubi7,10-ubi8,12-ubi7,12-ubi8,latest NO -perl openshift 5.26-el7,5.26-ubi8,5.30-el7,latest NO -php openshift 7.2-ubi7,7.2-ubi8,7.3-ubi7,7.3-ubi8,latest NO -python openshift 2.7-ubi7,2.7-ubi8,3.6-ubi7,3.6-ubi8,3.8-ubi7,3.8-ubi8,latest NO -ruby openshift 2.5-ubi7,2.5-ubi8,2.6-ubi7,2.6-ubi8,2.7-ubi7,latest NO -wildfly openshift 10.0,10.1,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,8.1,9.0,latest NO ----- - -. Create a directory for your components: -+ -[source,terminal] ----- -$ mkdir my_components && cd my_components ----- - -. Download the example back-end application: -+ -[source,terminal] ----- -$ git clone https://github.com/openshift-evangelists/Wild-West-Backend backend ----- - -. Change to the back-end source directory: -+ -[source,terminal] ----- -$ cd backend ----- - -. Check that you have the correct files in the directory: -+ -[source,terminal] ----- -$ ls ----- -+ -.Example output -[source,terminal] ----- -debug.sh pom.xml src ----- - -. Build the back-end source files with Maven to create a JAR file: -+ -[source,terminal] ----- -$ mvn package ----- -+ -.Example output -[source,terminal] ----- -... -[INFO] -------------------------------------- -[INFO] BUILD SUCCESS -[INFO] -------------------------------------- -[INFO] Total time: 2.635 s -[INFO] Finished at: 2019-09-30T16:11:11-04:00 -[INFO] Final Memory: 30M/91M -[INFO] -------------------------------------- ----- - -. Create a component configuration of Java component-type named `backend`: -+ -[source,terminal] ----- -$ odo create --s2i openjdk18 backend --binary target/wildwest-1.0.jar ----- -+ -.Example output -[source,terminal] ----- - ✓ Validating component [1ms] - Please use `odo push` command to create the component with source deployed ----- -+ -Now the configuration file `config.yaml` is in the local directory of the back-end component that contains information about the component for deployment. - -. Check the configuration settings of the back-end component in the `config.yaml` file using: -+ -[source,terminal] ----- -$ odo config view ----- -+ -.Example output -[source,terminal] ----- -COMPONENT SETTINGS ------------------------------------------------- -PARAMETER CURRENT_VALUE -Type openjdk18 -Application app -Project myproject -SourceType binary -Ref -SourceLocation target/wildwest-1.0.jar -Ports 8080/TCP,8443/TCP,8778/TCP -Name backend -MinMemory -MaxMemory -DebugPort -Ignore -MinCPU -MaxCPU ----- - -. Push the component to the {product-title} cluster. -+ -[source,terminal] ----- -$ odo push ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking component [6ms] - -Configuration changes - ✓ Initializing component - ✓ Creating component [124ms] - -Pushing to component backend of type binary - ✓ Checking files for pushing [1ms] - ✓ Waiting for component to start [48s] - ✓ Syncing files to the component [811ms] - ✓ Building component [3s] ----- -+ -Using `odo push`, {product-title} creates a container to host the back-end component, deploys the container into a pod running on the {product-title} cluster, and starts the `backend` component. - -. Validate: - -* The status of the action in {odo-title}: -+ -[source,terminal] ----- -$ odo log -f ----- -+ -.Example output -[source,terminal] ----- -2019-09-30 20:14:19.738 INFO 444 --- [ main] c.o.wildwest.WildWestApplication : Starting WildWestApplication v1.0 onbackend-app-1-9tnhc with PID 444 (/deployments/wildwest-1.0.jar started by jboss in /deployments) ----- - -* The status of the back-end component: -+ -[source,terminal] ----- -$ odo list ----- -+ -.Example output -[source,terminal] ----- -APP NAME TYPE SOURCE STATE -app backend openjdk18 file://target/wildwest-1.0.jar Pushed ----- diff --git a/modules/developer-cli-odo-deploying-the-front-end-component.adoc b/modules/developer-cli-odo-deploying-the-front-end-component.adoc deleted file mode 100644 index 269e3da609ff..000000000000 --- a/modules/developer-cli-odo-deploying-the-front-end-component.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent-application-with-odo.adoc -// * cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc - -ifeval::["{context}" == "creating-a-multicomponent-application-with-odo"] -:multi: -endif::[] -ifeval::["{context}" == "creating-an-application-with-a-database"] -:database: -endif::[] - -:_content-type: PROCEDURE -[id="deploying-the-front-end-component_{context}"] - -= Deploying the front-end component - -To create and deploy a front-end component, download the Node.js application and push the source code to your cluster with `{odo-title}`. - -.Procedure - -. Download the example front-end application: -+ -[source,terminal] ----- -$ git clone https://github.com/openshift/nodejs-ex frontend ----- - -. Change the current directory to the front-end directory: -+ -[source,terminal] ----- -$ cd frontend ----- - -. List the contents of the directory to see that the front end is a Node.js application. -+ -[source,terminal] ----- -$ ls ----- -+ -.Example output -[source,terminal] ----- -README.md openshift server.js views -helm package.json tests ----- -+ -[NOTE] -==== -The front-end component is written in an interpreted language (Node.js); it does not need to be built. -==== - -. Create a component configuration of Node.js component-type named `frontend`: -+ -[source,terminal] ----- -$ odo create --s2i nodejs frontend ----- -+ -.Example output -[source,terminal] ----- - ✓ Validating component [5ms] -Please use `odo push` command to create the component with source deployed ----- - -ifdef::database[] -. Create a URL to access the frontend interface. -+ -[source,terminal] ----- -$ odo url create myurl ----- -+ -.Example output -[source,terminal] ----- - ✓ URL myurl created for component: nodejs-nodejs-ex-pmdp ----- - -. Push the component to the {product-title} cluster. -+ -[source,terminal] ----- -$ odo push ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking component [7ms] - - Configuration changes - ✓ Initializing component - ✓ Creating component [134ms] - - Applying URL changes - ✓ URL myurl: http://myurl-app-myproject.192.168.42.79.nip.io created - - Pushing to component nodejs-nodejs-ex-mhbb of type local - ✓ Checking files for pushing [657850ns] - ✓ Waiting for component to start [6s] - ✓ Syncing files to the component [408ms] - ✓ Building component [7s] - ✓ Changes successfully pushed to component ----- -endif::database[] - -ifdef::multi[] -. Push the component to a running container. -+ -[source,terminal] ----- -$ odo push ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking component [8ms] - -Configuration changes - ✓ Initializing component - ✓ Creating component [83ms] - -Pushing to component frontend of type local - ✓ Checking files for pushing [2ms] - ✓ Waiting for component to start [45s] - ✓ Syncing files to the component [3s] - ✓ Building component [18s] - ✓ Changes successfully pushed to component ----- -endif::multi[] - -ifeval::["{context}" == "creating-a-multicomponent-application-with-odo"] -:!multi: -endif::[] -ifeval::["{context}" == "creating-an-application-with-a-database"] -:!database: -endif::[] diff --git a/modules/developer-cli-odo-developer-setup.adoc b/modules/developer-cli-odo-developer-setup.adoc deleted file mode 100644 index 6f12c5abc80e..000000000000 --- a/modules/developer-cli-odo-developer-setup.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-architecture.adoc - -[id="developer-setup_{context}"] - -= Developer setup - -With {odo-title} you can create and deploy application on {product-title} clusters from a terminal. Code editor plugins use {odo-title} which allows users to interact with {product-title} clusters from their IDE terminals. Examples of plugins that use {odo-title}: VS Code OpenShift Connector, OpenShift Connector for Intellij, Codewind for Eclipse Che. - -{odo-title} works on Windows, macOS, and Linux operating systems and from any terminal. {odo-title} provides autocompletion for bash and zsh command line shells. - -{odo-title} supports Node.js and Java components. diff --git a/modules/developer-cli-odo-exposing-the-components-to-the-public.adoc b/modules/developer-cli-odo-exposing-the-components-to-the-public.adoc deleted file mode 100644 index 3478a5de5959..000000000000 --- a/modules/developer-cli-odo-exposing-the-components-to-the-public.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent-application-with-odo.adoc - -:_content-type: PROCEDURE -[id="exposing-the-components-to-the-public_{context}"] - -= Exposing components to the public - -.Procedure - -. Navigate to the `frontend` directory: -+ -[source,terminal] ----- -$ cd frontend ----- - -. Create an external URL for the application: -+ -[source,terminal] ----- -$ odo url create frontend --port 8080 ----- -+ -.Example output -[source,terminal] ----- - ✓ URL frontend created for component: frontend - -To create URL on the OpenShift cluster, use `odo push` ----- - -. Apply the changes: -+ -[source,terminal] ----- -$ odo push ----- -+ -.Example output -[source,terminal] ----- -Validation - ✓ Checking component [21ms] - -Configuration changes - ✓ Retrieving component data [35ms] - ✓ Applying configuration [29ms] - -Applying URL changes - ✓ URL frontend: http://frontend-app-myproject.192.168.42.79.nip.io created - -Pushing to component frontend of type local - ✓ Checking file changes for pushing [1ms] - ✓ No file changes detected, skipping build. Use the '-f' flag to force the build. ----- - -. Open the URL in a browser to view the application. - -[NOTE] -==== -If an application requires permissions to the active service account to access the {product-title} namespace and delete active pods, the following error may occur when looking at `odo log` from the back-end component: - -`Message: Forbidden!Configured service account doesn't have access. Service account may have been revoked` - -To resolve this error, add permissions for the service account role: - -[source,terminal] ----- -$ oc policy add-role-to-group view system:serviceaccounts -n <project> ----- - -[source,terminal] ----- -$ oc policy add-role-to-group edit system:serviceaccounts -n <project> ----- - -Do not do this on a production cluster. -==== diff --git a/modules/developer-cli-odo-ignoring-files-or-patterns.adoc b/modules/developer-cli-odo-ignoring-files-or-patterns.adoc deleted file mode 100644 index 66b6f6808461..000000000000 --- a/modules/developer-cli-odo-ignoring-files-or-patterns.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -[id="ignoring-files-or-patterns_{context}"] -= Ignoring files or patterns - -You can configure a list of files or patterns to ignore by modifying the `.odoignore` file in the root directory of your application. This applies to both `odo push` and `odo watch`. - -If the `.odoignore` file does _not_ exist, the `.gitignore` file is used instead for ignoring specific files and folders. - -To ignore `.git` files, any files with the `.js` extension, and the folder `tests`, add the following to either the `.odoignore` or the `.gitignore` file: - ----- -.git -*.js -tests/ ----- - -The `.odoignore` file allows any glob expressions. \ No newline at end of file diff --git a/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-power.adoc b/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-power.adoc deleted file mode 100644 index 34fd462aec70..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-power.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -[id="installing-odo-on-linux-on-ibm-power_{context}"] -= Installing {odo-title} on Linux on IBM Power - -[id="installing-odo-on-linux-on-ibm-power-binary_{context}"] -== Binary installation - -.Procedure - -. Obtain the binary: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/odo/latest/odo-linux-ppc64le -o /usr/local/bin/odo ----- - -. Change the permissions on the file: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/odo ----- - -[id="installing-odo-on-linux-on-ibm-power-tarball_{context}"] -== Tarball installation - -.Procedure - -. Obtain the tarball: -+ -[source,terminal] ----- -# sh -c 'curl -L https://mirror.openshift.com/pub/openshift-v4/clients/odo/latest/odo-linux-ppc64le.tar.gz | gzip -d > /usr/local/bin/odo' ----- - -. Change the permissions on the file: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/odo ----- diff --git a/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-z.adoc b/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-z.adoc deleted file mode 100644 index 38ab5abb40a1..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-linux-on-ibm-z.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -[id="installing-odo-on-linux-on-ibm-z"] - -= Installing {odo-title} on Linux on {ibmzProductName} and {linuxoneProductName} - -== Binary installation - -.Procedure - -. Obtain the binary: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/odo/latest/odo-linux-s390x -o /usr/local/bin/odo ----- - -. Change the permissions on the file: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/odo ----- - -== Tarball installation - -.Procedure - -. Obtain the tarball: -+ -[source,terminal] ----- -# sh -c 'curl -L https://mirror.openshift.com/pub/openshift-v4/clients/odo/latest/odo-linux-s390x.tar.gz | gzip -d > /usr/local/bin/odo' ----- - -. Change the permissions on the file: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/odo ----- diff --git a/modules/developer-cli-odo-installing-odo-on-linux-rpm.adoc b/modules/developer-cli-odo-installing-odo-on-linux-rpm.adoc deleted file mode 100644 index 0f7a5ab53451..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-linux-rpm.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: PROCEDURE -[id="installing-odo-on-linux-rpm_{context}"] - -= Installing {odo-title} on {op-system-base-full} using an RPM - -For {op-system-base-full}, you can install the `{odo-title}` CLI as an RPM. - -.Procedure - -. Register with Red Hat Subscription Manager: -+ -[source,terminal] ----- -# subscription-manager register ----- - -. Pull the latest subscription data: -+ -[source,terminal] ----- -# subscription-manager refresh ----- - -. List the available subscriptions: -+ -[source,terminal] ----- -# subscription-manager list --available --matches '*OpenShift Developer Tools and Services*' ----- - -. In the output of the previous command, find the `Pool ID` field for your {product-title} subscription and attach the subscription to the registered system: -+ -[source,terminal] ----- -# subscription-manager attach --pool=<pool_id> ----- - -. Enable the repositories required by `{odo-title}`: -+ -[source,terminal,subs="attributes+"] ----- -# subscription-manager repos --enable="ocp-tools-{product-version}-for-rhel-8-x86_64-rpms" ----- - -. Install the `{odo-title}` package: -+ -[source,terminal] ----- -# yum install odo ----- - -. Verify that `{odo-title}` is now available on your system: -+ -[source,terminal] ----- -$ odo version ----- diff --git a/modules/developer-cli-odo-installing-odo-on-linux.adoc b/modules/developer-cli-odo-installing-odo-on-linux.adoc deleted file mode 100644 index f1598be3fc7d..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-linux.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -:_content-type: PROCEDURE -[id="installing-odo-on-linux_{context}"] - -= Installing {odo-title} on Linux - -The `{odo-title}` CLI is available to download as a binary and as a tarball for multiple operating systems and architectures including: - -[cols="2,1,1",options="header"] -|=== -|Operating System|Binary|Tarball -|Linux|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64[odo-linux-amd64] |link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64.tar.gz[odo-linux-amd64.tar.gz] -|Linux on {ibmpowerProductName}|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-ppc64le[odo-linux-ppc64le] |link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-ppc64le.tar.gz[odo-linux-ppc64le.tar.gz] -|Linux on {ibmzProductName} and {linuxoneProductName}|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-s390x[odo-linux-s390x] |link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-s390x.tar.gz[odo-linux-s390x.tar.gz] -|=== - - -.Procedure - -. Navigate to the link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/[content gateway] and download the appropriate file for your operating system and architecture. -** If you download the binary, rename it to `odo`: -+ -[source,terminal] ----- -$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64 -o odo ----- -** If you download the tarball, extract the binary: -+ -[source,terminal] ----- -$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64.tar.gz -o odo.tar.gz -$ tar xvzf odo.tar.gz ----- -. Change the permissions on the binary: -+ -[source,terminal] ----- -$ chmod +x <filename> ----- -. Place the `{odo-title}` binary in a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- -. Verify that `{odo-title}` is now available on your system: -+ -[source,terminal] ----- -$ odo version ----- diff --git a/modules/developer-cli-odo-installing-odo-on-macos.adoc b/modules/developer-cli-odo-installing-odo-on-macos.adoc deleted file mode 100644 index eac77e49908d..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-macos.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -:_content-type: PROCEDURE -[id="installing-odo-on-macos_{context}"] - -= Installing {odo-title} on macOS - -The `{odo-title}` CLI for macOS is available to download as a binary and as a tarball. - -[cols="2,1,1",options="header"] -|=== -|Operating System|Binary|Tarball -|macOS|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64[odo-darwin-amd64]|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64.tar.gz[odo-darwin-amd64.tar.gz] -|=== - -.Procedure - -. Navigate to the link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/[content gateway] and download the appropriate file: -** If you download the binary, rename it to `odo`: -+ -[source,terminal] ----- -$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64 -o odo ----- -** If you download the tarball, extract the binary: -+ -[source,terminal] ----- -$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64.tar.gz -o odo.tar.gz -$ tar xvzf odo.tar.gz ----- -. Change the permissions on the binary: -+ -[source,terminal] ----- -# chmod +x odo ----- -. Place the `{odo-title}` binary in a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- -. Verify that `{odo-title}` is now available on your system: -+ -[source,terminal] ----- -$ odo version ----- \ No newline at end of file diff --git a/modules/developer-cli-odo-installing-odo-on-vs-code.adoc b/modules/developer-cli-odo-installing-odo-on-vs-code.adoc deleted file mode 100644 index ed7438a4d40a..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-vs-code.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -:_content-type: PROCEDURE -[id="installing-odo-on-vs-code_{context}"] - -= Installing odo on VS Code - -The https://marketplace.visualstudio.com/items?itemName=redhat.vscode-openshift-connector[OpenShift VS Code extension] uses both `odo` and the `oc` binary to interact with your {product-title} cluster. To work with these features, install the OpenShift VS Code extension on VS Code. - -.Prerequisites - -* You have installed VS Code. - -.Procedure - -. Open VS Code. - -. Launch VS Code Quick Open with `Ctrl`+`P`. - -. Enter the following command: -+ ----- -$ ext install redhat.vscode-openshift-connector ----- diff --git a/modules/developer-cli-odo-installing-odo-on-windows.adoc b/modules/developer-cli-odo-installing-odo-on-windows.adoc deleted file mode 100644 index 934fdb7c42ee..000000000000 --- a/modules/developer-cli-odo-installing-odo-on-windows.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/installing-odo.adoc - -:_content-type: PROCEDURE -[id="installing-odo-on-windows_{context}"] - -= Installing {odo-title} on Windows - -The `{odo-title}` CLI for Windows is available to download as a binary and as an archive. - -[cols="2,1,1",options="header"] -|=== -|Operating System|Binary|Tarball -|Windows|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-windows-amd64.exe[odo-windows-amd64.exe]|link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-windows-amd64.exe.zip[odo-windows-amd64.exe.zip] -|=== - -.Procedure - -. Navigate to the link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/[content gateway] and download the appropriate file: -** If you download the binary, rename it to `odo.exe`. -** If you download the archive, unzip the binary with a ZIP program and then rename it to `odo.exe`. -. Move the `odo.exe` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, open the command prompt and execute the following command: -+ -[source,terminal] ----- -C:\> path ----- -. Verify that `{odo-title}` is now available on your system: -+ -[source,terminal] ----- -C:\> odo version ----- \ No newline at end of file diff --git a/modules/developer-cli-odo-linking-both-components.adoc b/modules/developer-cli-odo-linking-both-components.adoc deleted file mode 100644 index 009d6027756d..000000000000 --- a/modules/developer-cli-odo-linking-both-components.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent-application-with-odo.adoc -:_content-type: PROCEDURE -[id="linking-both-components_{context}"] - -= Linking both components - -Components running on the cluster need to be connected to interact. {product-title} provides linking mechanisms to publish communication bindings from a program to its clients. - -.Procedure - -. List all the components that are running on the cluster: -+ -[source,terminal] ----- -$ odo list ----- -+ -.Example output -[source,terminal] ----- -OpenShift Components: -APP NAME PROJECT TYPE SOURCETYPE STATE -app backend testpro openjdk18 binary Pushed -app frontend testpro nodejs local Pushed ----- - - -. Link the current front-end component to the back end: -+ -[source,terminal] ----- -$ odo link backend --port 8080 ----- -+ -.Example output -[source,terminal] ----- - ✓ Component backend has been successfully linked from the component frontend - -Following environment variables were added to frontend component: -- COMPONENT_BACKEND_HOST -- COMPONENT_BACKEND_PORT ----- -+ -The configuration information of the back-end component is added to the front-end component and the front-end component restarts. diff --git a/modules/developer-cli-odo-listing-available-devfile-components.adoc b/modules/developer-cli-odo-listing-available-devfile-components.adoc deleted file mode 100644 index d04a6f69d92f..000000000000 --- a/modules/developer-cli-odo-listing-available-devfile-components.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/creating-a-java-application-using-devfile - -:_content-type: PROCEDURE -[id="listing-available-devfile-components_{context}"] -= Listing available devfile components - -With `{odo-title}`, you can display all the components that are available for you on the cluster. Components that are available depend on the configuration of your cluster. - -.Procedure - -. To list available devfile components on your cluster, run: -+ -[source,terminal] ----- -$ odo catalog list components ----- -+ -The output lists the available `{odo-title}` components: -+ -[source,terminal] ----- -Odo Devfile Components: -NAME DESCRIPTION REGISTRY -java-maven Upstream Maven and OpenJDK 11 DefaultDevfileRegistry -java-openliberty Open Liberty microservice in Java DefaultDevfileRegistry -java-quarkus Upstream Quarkus with Java+GraalVM DefaultDevfileRegistry -java-springboot Spring Boot® using Java DefaultDevfileRegistry -nodejs Stack with NodeJS 12 DefaultDevfileRegistry - -Odo OpenShift Components: -NAME PROJECT TAGS SUPPORTED -java openshift 11,8,latest YES -dotnet openshift 2.1,3.1,latest NO -golang openshift 1.13.4-ubi7,1.13.4-ubi8,latest NO -httpd openshift 2.4-el7,2.4-el8,latest NO -nginx openshift 1.14-el7,1.14-el8,1.16-el7,1.16-el8,latest NO -nodejs openshift 10-ubi7,10-ubi8,12-ubi7,12-ubi8,latest NO -perl openshift 5.26-el7,5.26-ubi8,5.30-el7,latest NO -php openshift 7.2-ubi7,7.2-ubi8,7.3-ubi7,7.3-ubi8,latest NO -python openshift 2.7-ubi7,2.7-ubi8,3.6-ubi7,3.6-ubi8,3.8-ubi7,3.8-ubi8,latest NO -ruby openshift 2.5-ubi7,2.5-ubi8,2.6-ubi7,2.6-ubi8,2.7-ubi7,latest NO -wildfly openshift 10.0,10.1,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,8.1,9.0,latest NO ----- diff --git a/modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc b/modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc deleted file mode 100644 index 07b5c2f44682..000000000000 --- a/modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-the-operators.adoc - -[id="listing-available-services-from-the-operators-installed-on-the-cluster_{context}"] - -= Listing available services from the Operators installed on the cluster - -With `{odo-title}`, you can display the list of the Operators installed on your cluster, and the services they provide. - -* To list the Operators installed in current project, run: -+ -[source,terminal] ----- -$ odo catalog list services ----- -+ -The command lists Operators and the CRDs. -The output of the command shows the Operators installed on your cluster. For example: -+ -[source,terminal] ----- -Operators available in the cluster -NAME CRDs -etcdoperator.v0.9.4 EtcdCluster, EtcdBackup, EtcdRestore -mongodb-enterprise.v1.4.5 MongoDB, MongoDBUser, MongoDBOpsManager ----- -+ -`etcdoperator.v0.9.4` is the Operator, `EtcdCluster`, `EtcdBackup` and `EtcdRestore` are the CRDs provided by the Operator. diff --git a/modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc b/modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc deleted file mode 100644 index 2c041622ab7d..000000000000 --- a/modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster - -:_content-type: PROCEDURE -[id="mirroring-a-supported-builder-image_{context}"] -= Mirroring a supported builder image - -To use npm packages for Node.js dependencies and Maven packages for Java dependencies and configure a runtime environment for your application, you must mirror a respective builder image from the mirror registry. - - -.Procedure - -. Verify that the required images tag is not imported: -+ -[source,terminal] ----- -$ oc describe is nodejs -n openshift ----- -+ -.Example output -[source,terminal] ----- -Name: nodejs -Namespace: openshift -[...] - -10 - tagged from <mirror-registry>:<port>/rhoar-nodejs/nodejs-10 - prefer registry pullthrough when referencing this tag - - Build and run Node.js 10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/nodeshift/centos7-s2i-nodejs. - Tags: builder, nodejs, hidden - Example Repo: https://github.com/sclorg/nodejs-ex.git - - ! error: Import failed (NotFound): dockerimage.image.openshift.io "<mirror-registry>:<port>/rhoar-nodejs/nodejs-10:latest" not found - About an hour ago - -10-SCL (latest) - tagged from <mirror-registry>:<port>/rhscl/nodejs-10-rhel7 - prefer registry pullthrough when referencing this tag - - Build and run Node.js 10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/nodeshift/centos7-s2i-nodejs. - Tags: builder, nodejs - Example Repo: https://github.com/sclorg/nodejs-ex.git - - ! error: Import failed (NotFound): dockerimage.image.openshift.io "<mirror-registry>:<port>/rhscl/nodejs-10-rhel7:latest" not found - About an hour ago - -[...] ----- - -. Mirror the supported image tag to the private registry: -+ -[source,terminal] ----- -$ oc image mirror registry.access.redhat.com/rhscl/nodejs-10-rhel7:<tag> <private_registry>/rhscl/nodejs-10-rhel7:<tag> ----- - -. Import the image: -+ -[source,terminal] ----- -$ oc tag <mirror-registry>:<port>/rhscl/nodejs-10-rhel7:<tag> nodejs-10-rhel7:latest --scheduled ----- -+ -You must periodically re-import the image. The `--scheduled` flag enables automatic re-import of the image. - -. Verify that the images with the given tag have been imported: -+ -[source,terminal] ----- -$ oc describe is nodejs -n openshift ----- -+ -.Example output -[source,terminal] ----- -Name: nodejs -[...] -10-SCL (latest) - tagged from <mirror-registry>:<port>/rhscl/nodejs-10-rhel7 - prefer registry pullthrough when referencing this tag - - Build and run Node.js 10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/nodeshift/centos7-s2i-nodejs. - Tags: builder, nodejs - Example Repo: https://github.com/sclorg/nodejs-ex.git - - * <mirror-registry>:<port>/rhscl/nodejs-10-rhel7@sha256:d669ecbc11ac88293de50219dae8619832c6a0f5b04883b480e073590fab7c54 - 3 minutes ago - -[...] ----- diff --git a/modules/developer-cli-odo-modifying-the-running-application.adoc b/modules/developer-cli-odo-modifying-the-running-application.adoc deleted file mode 100644 index fd4b4295a83d..000000000000 --- a/modules/developer-cli-odo-modifying-the-running-application.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-multicomponent--application-with-odo.adoc - -:_content-type: PROCEDURE -[id="modifying-the-running-application_{context}"] - -= Modifying the running application - -.Procedure - -. Change the local directory to the front-end directory: -+ -[source,terminal] ----- -$ cd frontend ----- - -. Monitor the changes on the file system using: -+ -[source,terminal] ----- -$ odo watch ----- - -. Edit the `index.html` file to change the displayed name for the game. -+ -[NOTE] -==== -A slight delay is possible before {odo-title} recognizes the change. -==== -+ -{odo-title} pushes the changes to the front-end component and prints its status to the terminal: -+ -[source,terminal] ----- -File /root/frontend/index.html changed -File changed -Pushing files... - ✓ Waiting for component to start - ✓ Copying files to component - ✓ Building component ----- - -. Refresh the application page in the web browser. The new name is now displayed. diff --git a/modules/developer-cli-odo-modifying-your-application-code.adoc b/modules/developer-cli-odo-modifying-your-application-code.adoc deleted file mode 100644 index ddef73601a08..000000000000 --- a/modules/developer-cli-odo-modifying-your-application-code.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/creating-a-single-component-application-with-odo.adoc - -[id="modifying-your-application-code_{context}"] -= Modifying your application code - -You can modify your application code and have the changes applied to your application on {product-title}. - -. Edit one of the layout files within the Node.js directory with your preferred text editor. - -. Update your component: -+ -[source,terminal] ----- -$ odo push ----- -. Refresh your application in the browser to see the changes. diff --git a/modules/developer-cli-odo-openshift-cluster-objects.adoc b/modules/developer-cli-odo-openshift-cluster-objects.adoc deleted file mode 100644 index bd6e81441545..000000000000 --- a/modules/developer-cli-odo-openshift-cluster-objects.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-architecture.adoc - -[id="openshift-cluster-objects_{context}"] -= OpenShift cluster objects - -== Init Containers -Init containers are specialized containers that run before the application container starts and configure the necessary environment for the application containers to run. Init containers can have files that application images do not have, for example setup scripts. Init containers always run to completion and the application container does not start if any of the init containers fails. - -The pod created by {odo-title} executes two Init Containers: - -* The `copy-supervisord` Init container. -* The `copy-files-to-volume` Init container. - -=== `copy-supervisord` - -The `copy-supervisord` Init container copies necessary files onto an `emptyDir` volume. The main application container utilizes these files from the `emptyDir` volume. - -.Files that are copied onto the `emptyDir` volume: -* Binaries: -** `go-init` is a minimal init system. It runs as the first process (PID 1) inside the application container. go-init starts the `SupervisorD` daemon which runs the developer code. go-init is required to handle orphaned processes. -** `SupervisorD` is a process control system. It watches over configured processes and ensures that they are running. It also restarts services when necessary. For {odo-title}, `SupervisorD` executes and monitors the developer code. - -* Configuration files: -** `supervisor.conf` is the configuration file necessary for the SupervisorD daemon to start. -* Scripts: -** `assemble-and-restart` is an OpenShift S2I concept to build and deploy user-source code. The assemble-and-restart script first assembles the user source code inside the application container and then restarts SupervisorD for user changes to take effect. -** `Run` is an OpenShift S2I concept of executing the assembled source code. The `run` script executes the assembled code created by the `assemble-and-restart` script. -** `s2i-setup` is a script that creates files and directories which are necessary for the `assemble-and-restart` and run scripts to execute successfully. The script is executed whenever the application container starts. - -* Directories: -** `language-scripts`: OpenShift S2I allows custom `assemble` and `run` scripts. A few language specific custom scripts are present in the `language-scripts` directory. The custom scripts provide additional configuration to make {odo-title} debug work. - -The `emptyDir` volume is mounted at the `/opt/odo` mount point for both the Init container and the application container. - -=== `copy-files-to-volume` -The `copy-files-to-volume` Init container copies files that are in `/opt/app-root` in the S2I builder image onto the persistent volume. The volume is then mounted at the same location (`/opt/app-root`) in an application container. - -Without the persistent volume on `/opt/app-root` the data in this directory is lost when the persistent volume claim is mounted at the same location. - -The PVC is mounted at the `/mnt` mount point inside the Init container. - -== Application container -Application container is the main container inside of which the user-source code executes. - -Application container is mounted with two volumes: - -* `emptyDir` volume mounted at `/opt/odo` -* The persistent volume mounted at `/opt/app-root` - -`go-init` is executed as the first process inside the application container. The `go-init` process then starts the `SupervisorD` daemon. - -`SupervisorD` executes and monitors the user assembled source code. If the user process crashes, `SupervisorD` restarts it. - -== Persistent volumes and persistent volume claims -A persistent volume claim (PVC) is a volume type in Kubernetes which provisions a persistent volume. The life of a persistent volume is independent of a pod lifecycle. The data on the persistent volume persists across pod restarts. - -The `copy-files-to-volume` Init container copies necessary files onto the persistent volume. The main application container utilizes these files at runtime for execution. - -The naming convention of the persistent volume is <component_name>-s2idata. - -[options="header"] -|=== -| Container | PVC mounted at -| `copy-files-to-volume` -| `/mnt` - -| Application container -| `/opt/app-root` -|=== - -== emptyDir volume -An `emptyDir` volume is created when a pod is assigned to a node, and exists as long as that pod is running on the node. If the container is restarted or moved, the content of the `emptyDir` is removed, Init container restores the data back to the `emptyDir`. `emptyDir` is initially empty. - -The `copy-supervisord` Init container copies necessary files onto the `emptyDir` volume. These files are then utilized by the main application container at runtime for execution. - -[options="header"] -|=== -| Container | `emptyDir volume` mounted at -| `copy-supervisord` -| `/opt/odo` - -| Application container -| `/opt/odo` -|=== - -== Service -A service is a Kubernetes concept of abstracting the way of communicating with a set of pods. - -{odo-title} creates a service for every application pod to make it accessible for communication. diff --git a/modules/developer-cli-odo-openshift-source-to-image.adoc b/modules/developer-cli-odo-openshift-source-to-image.adoc deleted file mode 100644 index 7341c89caf1d..000000000000 --- a/modules/developer-cli-odo-openshift-source-to-image.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-architecture.adoc - -[id="openshift-source-to-image_{context}"] - -= OpenShift source-to-image - -OpenShift Source-to-Image (S2I) is an open-source project which helps in building artifacts from source code and injecting these into container images. S2I produces ready-to-run images by building source code without the need of a Dockerfile. -{odo-title} uses S2I builder image for executing developer source code inside a container. diff --git a/modules/developer-cli-odo-overwriting-a-mirror-registry.adoc b/modules/developer-cli-odo-overwriting-a-mirror-registry.adoc deleted file mode 100644 index f970a33f62e9..000000000000 --- a/modules/developer-cli-odo-overwriting-a-mirror-registry.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster - -:_content-type: PROCEDURE -[id="overwriting-the-mirror-registry_{context}"] -= Overwriting the mirror registry - -To download npm packages for Node.js dependencies and Maven packages for Java dependencies from a private mirror registry, you must create and configure a mirror npm or Maven registry on the cluster. You can then overwrite the mirror registry on an existing component or when you create a new component. - -.Procedure - -* To overwrite the mirror registry on an existing component: -+ -[source,terminal] ----- -$ odo config set --env NPM_MIRROR=<npm_mirror_registry> ----- - -* To overwrite the mirror registry when creating a component: -+ -[source,terminal] ----- -$ odo component create nodejs --env NPM_MIRROR=<npm_mirror_registry> ----- diff --git a/modules/developer-cli-odo-preference-table.adoc b/modules/developer-cli-odo-preference-table.adoc deleted file mode 100644 index 5fd8e5d2a27f..000000000000 --- a/modules/developer-cli-odo-preference-table.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -:_content-type: REFERENCE -[id="developer-cli-odo-preference-table_{context}"] -= Preference key table - -The following table shows the available options for setting preference keys for the `odo` CLI: - -[cols="1,3,1"] -|=== -|Preference key |Description |Default value - -|`UpdateNotification` -|Control whether a notification to update `odo` is shown. -|True - -|`NamePrefix` -|Set a default name prefix for an `odo` resource. For example, `component` or `storage`. -|Current directory name - -|`Timeout` -|Timeout for the Kubernetes server connection check. -|1 second - -|`BuildTimeout` -|Timeout for waiting for a build of the git component to complete. -|300 seconds - -|`PushTimeout` -|Timeout for waiting for a component to start. -|240 seconds - -|`Ephemeral` -|Controls whether `odo` should create an `emptyDir` volume to store source code. -|True - -|`ConsentTelemetry` -|Controls whether `odo` can collect telemetry for the user's `odo` usage. -|False - -|=== diff --git a/modules/developer-cli-odo-push-workflow.adoc b/modules/developer-cli-odo-push-workflow.adoc deleted file mode 100644 index 555e90be5113..000000000000 --- a/modules/developer-cli-odo-push-workflow.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-architecture.adoc - -[id="odo-push-workflow_{context}"] - -= {odo-title} push workflow -This section describes `{odo-title} push` workflow. {odo-title} push deploys user code on an {product-title} cluster with all the necessary {product-title} resources. - -. Creating resources -+ -If not already created, `{odo-title}` push creates the following {product-title} resources: -+ -* `DeploymentConfig` object: -** Two init containers are executed: `copy-supervisord` and `copy-files-to-volume`. The init containers copy files onto the `emptyDir` and the `PersistentVolume` type of volumes respectively. -** The application container starts. The first process in the application container is the `go-init` process with PID=1. -** `go-init` process starts the SupervisorD daemon. -+ -[NOTE] -==== -The user application code has not been copied into the application container yet, so the `SupervisorD` daemon does not execute the `run` script. -==== -+ -* `Service` object -* `Secret` objects -* `PersistentVolumeClaim` object - - -. Indexing files -+ -* A file indexer indexes the files in the source code directory. The indexer traverses through the source code directories recursively and finds files which have been created, deleted, or renamed. -* A file indexer maintains the indexed information in an {odo-title} index file inside the `.odo` directory. -* If the {odo-title} index file is not present, it means that the file indexer is being executed for the first time, and creates a new {odo-title} index JSON file. -The {odo-title} index JSON file contains a file map - the relative file paths of the traversed files and the absolute paths of the changed and deleted files. - -. Pushing code -+ -Local code is copied into the application container, usually under `/tmp/src`. - -. Executing `assemble-and-restart` -+ -On a successful copy of the source code, the `assemble-and-restart` script is executed inside the running application container. diff --git a/modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc b/modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc deleted file mode 100644 index 4deb48351980..000000000000 --- a/modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc +++ /dev/null @@ -1,134 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc - -:_content-type: PROCEDURE -[id="pushing-the-odo-init-image-to-a-mirror-registry_{context}"] -= Pushing the `odo` init image to a mirror registry - -Depending on your operating system, you can push the `odo` init image to a cluster with a mirror registry as follows: - -[id="pushing-the-init-image-to-a-mirror-registry-on-linux_{context}"] - -== Pushing the init image to a mirror registry on Linux - -.Procedure - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -$ echo <content_of_additional_ca> | base64 --decode > disconnect-ca.crt ----- - -. Copy the encoded root CA certificate to the appropriate location: -+ -[source,terminal] ----- -$ sudo cp ./disconnect-ca.crt /etc/pki/ca-trust/source/anchors/<mirror-registry>.crt ----- - -. Trust a CA in your client platform and log in to the {product-title} mirror registry: -+ -[source,terminal] ----- -$ sudo update-ca-trust enable && sudo systemctl daemon-reload && sudo systemctl restart / docker && docker login <mirror-registry>:5000 -u <username> -p <password> ----- - -. Mirror the `odo` init image: -+ -[source,terminal] ----- -$ oc image mirror registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -$ export ODO_BOOTSTRAPPER_IMAGE=<mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag> ----- - - -[id="pushing-the-init-image-to-a-mirror-registry-on-macos_{context}"] - -== Pushing the init image to a mirror registry on MacOS - -.Procedure - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -$ echo <content_of_additional_ca> | base64 --decode > disconnect-ca.crt ----- - -. Copy the encoded root CA certificate to the appropriate location: -+ -.. Restart Docker using the Docker UI. -+ -.. Run the following command: -+ -[source,terminal] ----- -$ docker login <mirror-registry>:5000 -u <username> -p <password> ----- - -. Mirror the `odo` init image: -+ -[source,terminal] ----- -$ oc image mirror registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -$ export ODO_BOOTSTRAPPER_IMAGE=<mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag> ----- - -[id="pushing-the-init-image-to-a-mirror-registry-on-windows_{context}"] - -== Pushing the init image to a mirror registry on Windows - -.Procedure - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -PS C:\> echo <content_of_additional_ca> | base64 --decode > disconnect-ca.crt ----- - -. As an administrator, copy the encoded root CA certificate to the appropriate location by executing the following command: -+ -[source,terminal] ----- -PS C:\WINDOWS\system32> certutil -addstore -f "ROOT" disconnect-ca.crt ----- - -. Trust a CA in your client platform and log in to the {product-title} mirror registry: -+ -.. Restart Docker using the Docker UI. -+ -.. Run the following command: -+ -[source,terminal] ----- -PS C:\WINDOWS\system32> docker login <mirror-registry>:5000 -u <username> -p <password> ----- - -. Mirror the `odo` init image: -+ -[source,terminal] ----- -PS C:\> oc image mirror registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -PS C:\> $env:ODO_BOOTSTRAPPER_IMAGE="<mirror-registry>:5000/openshiftdo/odo-init-image-rhel7:<tag>" ----- diff --git a/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc b/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc deleted file mode 100644 index 19da7763d919..000000000000 --- a/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc +++ /dev/null @@ -1,239 +0,0 @@ -// Module included in the following assemblies: -// -// cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc - -:_content-type: PROCEDURE -[id="pushing-the-odo-init-image-to-an-internal-registry-directly_{context}"] -= Pushing the `odo` init image to an {product-registry} directly - -If your cluster allows images to be pushed to the {product-registry} directly, push the `odo` init image to the registry as follows: - -[id="pushing-the-init-image-directly-on-linux_{context}"] - -== Pushing the init image directly on Linux - -.Procedure - -. Enable the default route: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io cluster -p '{"spec":{"defaultRoute":true}}' --type='merge' -n openshift-image-registry ----- - -. Get a wildcard route CA: -+ -[source,terminal] ----- -$ oc get secret router-certs-default -n openshift-ingress -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -data: - tls.crt: ************************** - tls.key: ################## -kind: Secret -metadata: - [...] -type: kubernetes.io/tls ----- - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -$ echo <tls.crt> | base64 --decode > ca.crt ----- - -. Trust a CA in your client platform: -+ -[source,terminal] ----- -$ sudo cp ca.crt /etc/pki/ca-trust/source/anchors/externalroute.crt && sudo update-ca-trust enable && sudo systemctl daemon-reload && sudo systemctl restart docker ----- - -. Log in to the {product-registry}: -+ -[source,terminal] ----- -$ oc get route -n openshift-image-registry -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -default-route <registry_path> image-registry <all> reencrypt None - -$ docker login <registry_path> -u kubeadmin -p $(oc whoami -t) ----- - -. Push the `odo` init image: -+ -[source,terminal] ----- -$ docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> - -$ docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> - -$ docker push <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -$ export ODO_BOOTSTRAPPER_IMAGE=<registry_path>/openshiftdo/odo-init-image-rhel7:1.0.1 ----- - - -[id="pushing-the-init-image-directly-on-macos_{context}"] - -== Pushing the init image directly on MacOS - -.Procedure - -. Enable the default route: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io cluster -p '{"spec":{"defaultRoute":true}}' --type='merge' -n openshift-image-registry ----- - -. Get a wildcard route CA: -+ -[source,terminal] ----- -$ oc get secret router-certs-default -n openshift-ingress -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -data: - tls.crt: ************************** - tls.key: ################## -kind: Secret -metadata: - [...] -type: kubernetes.io/tls ----- - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -$ echo <tls.crt> | base64 --decode > ca.crt ----- - -. Trust a CA in your client platform: -+ -[source,terminal] ----- -$ sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ca.crt ----- - -. Log in to the {product-registry}: -+ -[source,terminal] ----- -$ oc get route -n openshift-image-registry -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -default-route <registry_path> image-registry <all> reencrypt None - -$ docker login <registry_path> -u kubeadmin -p $(oc whoami -t) ----- - -. Push the `odo` init image: -+ -[source,terminal] ----- -$ docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> - -$ docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> - -$ docker push <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -$ export ODO_BOOTSTRAPPER_IMAGE=<registry_path>/openshiftdo/odo-init-image-rhel7:1.0.1 ----- - - -[id="pushing-the-init-image-directly-on-windows_{context}"] - -== Pushing the init image directly on Windows - -.Procedure - -. Enable the default route: -+ -[source,terminal] ----- -PS C:\> oc patch configs.imageregistry.operator.openshift.io cluster -p '{"spec":{"defaultRoute":true}}' --type='merge' -n openshift-image-registry ----- - -. Get a wildcard route CA: -+ -[source,terminal] ----- -PS C:\> oc get secret router-certs-default -n openshift-ingress -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -data: - tls.crt: ************************** - tls.key: ################## -kind: Secret -metadata: - [...] -type: kubernetes.io/tls ----- - -. Use `base64` to encode the root certification authority (CA) content of your mirror registry: -+ -[source,terminal] ----- -PS C:\> echo <tls.crt> | base64 --decode > ca.crt ----- - -. As an administrator, trust a CA in your client platform by executing the following command: -+ -[source,terminal] ----- -PS C:\WINDOWS\system32> certutil -addstore -f "ROOT" ca.crt ----- - -. Log in to the {product-registry}: -+ -[source,terminal] ----- -PS C:\> oc get route -n openshift-image-registry -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -default-route <registry_path> image-registry <all> reencrypt None - -PS C:\> docker login <registry_path> -u kubeadmin -p $(oc whoami -t) ----- - -. Push the `odo` init image: -+ -[source,terminal] ----- -PS C:\> docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> - -PS C:\> docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7:<tag> <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> - -PS C:\> docker push <registry_path>/openshiftdo/odo-init-image-rhel7:<tag> ----- - -. Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: -+ -[source,terminal] ----- -PS C:\> $env:ODO_BOOTSTRAPPER_IMAGE="<registry_path>/openshiftdo/odo-init-image-rhel7:<tag>" ----- diff --git a/modules/developer-cli-odo-ref-build-images.adoc b/modules/developer-cli-odo-ref-build-images.adoc deleted file mode 100644 index 583725081047..000000000000 --- a/modules/developer-cli-odo-ref-build-images.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: REFERENCE -[id="odo-build-images_{context}"] -= odo build-images - -`odo` can build container images based on Dockerfiles, and push these images to their registries. - -When running the `odo build-images` command, `odo` searches for all components in the `devfile.yaml` with the `image` type, for example: - -[source,yaml] ----- -components: -- image: - imageName: quay.io/myusername/myimage - dockerfile: - uri: ./Dockerfile <1> - buildContext: ${PROJECTS_ROOT} <2> - name: component-built-from-dockerfile ----- -<1> The `uri` field indicates the relative path of the Dockerfile to use, relative to the directory containing the `devfile.yaml`. The devfile specification indicates that `uri` could also be an HTTP URL, but this case is not supported by odo yet. -<2> The `buildContext` indicates the directory used as build context. The default value is `+${PROJECTS_ROOT}+`. - -For each image component, odo executes either `podman` or `docker` (the first one found, in this order), to build the image with the specified Dockerfile, build context, and arguments. - -If the `--push` flag is passed to the command, the images are pushed to their registries after they are built. diff --git a/modules/developer-cli-odo-ref-catalog.adoc b/modules/developer-cli-odo-ref-catalog.adoc deleted file mode 100644 index cb3d28106157..000000000000 --- a/modules/developer-cli-odo-ref-catalog.adoc +++ /dev/null @@ -1,208 +0,0 @@ -:_content-type: REFERENCE -[id="odo-catalog_{context}"] -= odo catalog - -`odo` uses different _catalogs_ to deploy _components_ and _services_. - -== Components - -`odo` uses the portable _devfile_ format to describe the components. It can connect to various devfile registries to download devfiles for different languages and frameworks. -See `odo registry` for more information. - -=== Listing components - -To list all the _devfiles_ available on the different registries, run the command: - -[source,terminal] ----- -$ odo catalog list components ----- - -.Example output -[source,terminal] ----- - NAME DESCRIPTION REGISTRY - go Stack with the latest Go version DefaultDevfileRegistry - java-maven Upstream Maven and OpenJDK 11 DefaultDevfileRegistry - nodejs Stack with Node.js 14 DefaultDevfileRegistry - php-laravel Stack with Laravel 8 DefaultDevfileRegistry - python Python Stack with Python 3.7 DefaultDevfileRegistry - [...] ----- - -=== Getting information about a component - -To get more information about a specific component, run the command: - -[source,terminal] ----- -$ odo catalog describe component ----- - -For example, run the command: - -[source,terminal] ----- -$ odo catalog describe component nodejs ----- - -.Example output -[source,terminal] ----- -* Registry: DefaultDevfileRegistry <1> - -Starter Projects: <2> ---- -name: nodejs-starter -attributes: {} -description: "" -subdir: "" -projectsource: - sourcetype: "" - git: - gitlikeprojectsource: - commonprojectsource: {} - checkoutfrom: null - remotes: - origin: https://github.com/odo-devfiles/nodejs-ex.git - zip: null - custom: null ----- -<1> _Registry_ is the registry from which the devfile is retrieved. -<2> _Starter projects_ are sample projects in the same language and framework of the devfile, that can help you start a new project. - - -See `odo create` for more information on creating a project from a starter project. - - -== Services - -`odo` can deploy _services_ with the help of _Operators_. - -Only Operators deployed with the help of the https://olm.operatorframework.io/[_Operator Lifecycle Manager_] are supported by odo. - -//// -See link:/docs/getting-started/cluster-setup/kubernetes#installing-the-operator-lifecycle-manager-olm[Installing the Operator Lifecycle Manager (OLM)] for more information. -//// - -=== Listing services - -To list the available Operators and their associated services, run the command: - -[source,terminal] ----- -$ odo catalog list services ----- - -.Example output -[source,terminal] ----- - Services available through Operators - NAME CRDs - postgresql-operator.v0.1.1 Backup, Database - redis-operator.v0.8.0 RedisCluster, Redis ----- - -In this example, two Operators are installed in the cluster. The `postgresql-operator.v0.1.1` Operator deploys services related to PostgreSQL: `Backup` and `Database`. -The `redis-operator.v0.8.0` Operator deploys services related to Redis: `RedisCluster` and `Redis`. - -[NOTE] -==== -To get a list of all the available Operators, `odo` fetches the ClusterServiceVersion (CSV) resources of the current namespace that are in a _Succeeded_ phase. -For Operators that support cluster-wide access, when a new namespace is created, these resources are automatically added to it. However, it may take some time before they are in the _Succeeded_ phase, and `odo` may return an empty list until the resources are ready. -==== - -=== Searching services - -To search for a specific service by a keyword, run the command: - -[source,terminal] ----- -$ odo catalog search service ----- - -For example, to retrieve the PostgreSQL services, run the command: - -[source,terminal] ----- -$ odo catalog search service postgres ----- - -.Example output -[source,terminal] ----- - Services available through Operators - NAME CRDs - postgresql-operator.v0.1.1 Backup, Database ----- - -You will see a list of Operators that contain the searched keyword in their name. - -=== Getting information about a service - -To get more information about a specific service, run the command: - -[source,terminal] ----- -$ odo catalog describe service ----- - -For example: - -[source,terminal] ----- -$ odo catalog describe service postgresql-operator.v0.1.1/Database ----- - -.Example output -[source,terminal] ----- -KIND: Database -VERSION: v1alpha1 - -DESCRIPTION: - Database is the Schema for the the Database Database API - -FIELDS: - awsAccessKeyId (string) - AWS S3 accessKey/token ID - - Key ID of AWS S3 storage. Default Value: nil Required to create the Secret - with the data to allow send the backup files to AWS S3 storage. -[...] ----- - -A service is represented in the cluster by a CustomResourceDefinition (CRD) resource. The previous command displays the details about the CRD such as `kind`, `version`, and the list of fields available to define an instance of this custom resource. - -The list of fields is extracted from the _OpenAPI schema_ included in the CRD. This information is optional in a CRD, and if it is not present, it is extracted from the ClusterServiceVersion (CSV) resource representing the service instead. - -It is also possible to request the description of an Operator-backed service, without providing CRD type information. To describe the Redis Operator on a cluster, without CRD, run the following command: - - -[source,terminal] ----- -$ odo catalog describe service redis-operator.v0.8.0 ----- - -.Example output -[source,terminal] ----- -NAME: redis-operator.v0.8.0 -DESCRIPTION: - - A Golang based redis operator that will make/oversee Redis - standalone/cluster mode setup on top of the Kubernetes. It can create a - redis cluster setup with best practices on Cloud as well as the Bare metal - environment. Also, it provides an in-built monitoring capability using - -... (cut short for beverity) - - Logging Operator is licensed under [Apache License, Version - 2.0](https://github.com/OT-CONTAINER-KIT/redis-operator/blob/master/LICENSE) - - -CRDs: - NAME DESCRIPTION - RedisCluster Redis Cluster - Redis Redis ----- diff --git a/modules/developer-cli-odo-ref-create.adoc b/modules/developer-cli-odo-ref-create.adoc deleted file mode 100644 index 028c99c804b2..000000000000 --- a/modules/developer-cli-odo-ref-create.adoc +++ /dev/null @@ -1,107 +0,0 @@ -:_content-type: REFERENCE -[id="odo-create_{context}"] -= odo create - - -`odo` uses a link:https://devfile.io[_devfile_] to store the configuration of a component and to describe the component's resources such as storage and services. The _odo create_ command generates this file. - -== Creating a component - -To create a _devfile_ for an existing project, run the `odo create` command with the name and type of your component (for example, `nodejs` or `go`): - -[source,terminal] ----- -odo create nodejs mynodejs ----- - -In the example, `nodejs` is the type of the component and `mynodejs` is the name of the component that `odo` creates for you. - -[NOTE] -==== -For a list of all the supported component types, run the command `odo catalog list components`. -==== - -If your source code exists outside the current directory, the `--context` flag can be used to specify the path. -For example, if the source for the nodejs component is in a folder called `node-backend` relative to the current working directory, run the command: - -[source,terminal] ----- -odo create nodejs mynodejs --context ./node-backend ----- - -The `--context` flag supports relative and absolute paths. - -To specify the project or app where your component will be deployed, use the `--project` and `--app` flags. -For example, to create a component that is part of the `myapp` app inside the `backend` project, run the command: - -[source,terminal] ----- -odo create nodejs --app myapp --project backend ----- - -[NOTE] -==== -If these flags are not specified, they will default to the active app and project. -==== - -== Starter projects - -Use the starter projects if you do not have existing source code but want to get up and running quickly to experiment with devfiles and components. -To use a starter project, add the `--starter` flag to the `odo create` command. - -To get a list of available starter projects for a component type, run the `odo catalog describe component` command. -For example, to get all available starter projects for the nodejs component type, run the command: - -[source,terminal] ----- -odo catalog describe component nodejs ----- - -Then specify the desired project using the `--starter` flag on the `odo create` command: - -[source,terminal] ----- -odo create nodejs --starter nodejs-starter ----- - -This will download the example template corresponding to the chosen component type, in this instance, `nodejs`. -The template is downloaded to your current directory, or to the location specified by the `--context` flag. -If a starter project has its own devfile, then this devfile will be preserved. - -== Using an existing devfile - -If you want to create a new component from an existing devfile, you can do so by specifying the path to the devfile using the `--devfile` flag. -For example, to create a component called `mynodejs`, based on a devfile from GitHub, use the following command: - -[source,terminal] ----- -odo create mynodejs --devfile https://raw.githubusercontent.com/odo-devfiles/registry/master/devfiles/nodejs/devfile.yaml ----- - -== Interactive creation - -You can also run the `odo create` command interactively, to guide you through the steps needed to create a component: - -[source,terminal,subs="verbatim,quotes"] ----- -$ odo create - -? Which devfile component type do you wish to create *go* -? What do you wish to name the new devfile component *go-api* -? What project do you want the devfile component to be created in *default* -Devfile Object Validation - ✓ Checking devfile existence [164258ns] - ✓ Creating a devfile component from registry: DefaultDevfileRegistry [246051ns] -Validation - ✓ Validating if devfile name is correct [92255ns] -? Do you want to download a starter project *Yes* - -Starter Project - ✓ Downloading starter project go-starter from https://github.com/devfile-samples/devfile-stack-go.git [429ms] - -Please use `odo push` command to create the component with source deployed ----- - -You are prompted to choose the component type, name, and the project for the component. You can also choose whether or not to download a starter project. Once finished, a new `devfile.yaml` file is created in the working directory. - -To deploy these resources to your cluster, run the command `odo push`. diff --git a/modules/developer-cli-odo-ref-delete.adoc b/modules/developer-cli-odo-ref-delete.adoc deleted file mode 100644 index 1271628c1c61..000000000000 --- a/modules/developer-cli-odo-ref-delete.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: REFERENCE -[id="odo-delete_{context}"] -= odo delete - - -The `odo delete` command is useful for deleting resources that are managed by `odo`. - -== Deleting a component - -To delete a _devfile_ component, run the `odo delete` command: - -[source,terminal] ----- -$ odo delete ----- - -If the component has been pushed to the cluster, the component is deleted from the cluster, along with its dependent storage, URL, secrets, and other resources. -If the component has not been pushed, the command exits with an error stating that it could not find the resources on the cluster. - -Use the `-f` or `--force` flag to avoid the confirmation questions. - -== Undeploying devfile Kubernetes components - -To undeploy the devfile Kubernetes components, that have been deployed with `odo deploy`, execute the `odo delete` command with the `--deploy` flag: - -[source,terminal] ----- -$ odo delete --deploy ----- - -Use the `-f` or `--force` flag to avoid the confirmation questions. - -== Delete all - -To delete all artifacts including the following items, run the `odo delete` command with the `--all` flag : - -* _devfile_ component -* Devfile Kubernetes component that was deployed using the `odo deploy` command -* Devfile -* Local configuration - -[source,terminal] ----- -$ odo delete --all ----- - -== Available flags - -`-f`, `--force`:: Use this flag to avoid the confirmation questions. -`-w`, `--wait`:: Use this flag to wait for component deletion and any dependencies. This flag does not work when undeploying. - -The documentation on _Common Flags_ provides more information on the flags available for commands. diff --git a/modules/developer-cli-odo-ref-deploy.adoc b/modules/developer-cli-odo-ref-deploy.adoc deleted file mode 100644 index 441b52f4a4c3..000000000000 --- a/modules/developer-cli-odo-ref-deploy.adoc +++ /dev/null @@ -1,68 +0,0 @@ -:_content-type: REFERENCE -[id="odo-deploy_{context}"] -= odo deploy - - -`odo` can be used to deploy components in a manner similar to how they would be deployed using a CI/CD system. -First, `odo` builds the container images, and then it deploys the Kubernetes resources required to deploy the components. - -When running the command `odo deploy`, `odo` searches for the default command of kind `deploy` in the devfile, and executes this command. -The kind `deploy` is supported by the devfile format starting from version 2.2.0. - -The `deploy` command is typically a _composite_ command, composed of several _apply_ commands: - -* A command referencing an `image` component that, when applied, will build the image of the container to deploy, and then push it to its registry. -* A command referencing a link:https://devfile.io/docs/devfile/2.2.0/user-guide/adding-kubernetes-component-to-a-devfile.html[Kubernetes component] that, when applied, will create a Kubernetes resource in the cluster. - -With the following example `devfile.yaml` file, a container image is built using the `Dockerfile` present in the directory. -The image is pushed to its registry and then a Kubernetes Deployment resource is created in the cluster, using this freshly built image. - -[source,terminal] ----- -schemaVersion: 2.2.0 -[...] -variables: - CONTAINER_IMAGE: quay.io/phmartin/myimage -commands: - - id: build-image - apply: - component: outerloop-build - - id: deployk8s - apply: - component: outerloop-deploy - - id: deploy - composite: - commands: - - build-image - - deployk8s - group: - kind: deploy - isDefault: true -components: - - name: outerloop-build - image: - imageName: "{{CONTAINER_IMAGE}}" - dockerfile: - uri: ./Dockerfile - buildContext: ${PROJECTS_ROOT} - - name: outerloop-deploy - kubernetes: - inlined: | - kind: Deployment - apiVersion: apps/v1 - metadata: - name: my-component - spec: - replicas: 1 - selector: - matchLabels: - app: node-app - template: - metadata: - labels: - app: node-app - spec: - containers: - - name: main - image: {{CONTAINER_IMAGE}} ----- diff --git a/modules/developer-cli-odo-ref-flags.adoc b/modules/developer-cli-odo-ref-flags.adoc deleted file mode 100644 index 3e20ae4bf374..000000000000 --- a/modules/developer-cli-odo-ref-flags.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: REFERENCE -[id="odo-flags_{context}"] -= Common flags - -The following flags are available with most `odo` commands: - -.odo flags - -[width="100%",cols="30%,78%",options="header",] -|=== -|Command |Description - -| `--context` -| Set the context directory where the component is defined. - -| `--project` -| Set the project for the component. Defaults to the project defined in the local configuration. If none is available, then current project on the cluster. - - -| `--app` -| Set the application of the component. Defaults to the application defined in the local configuration. If none is available, then _app_. - - -| `--kubeconfig` -| Set the path to the `kubeconfig` value if not using the default configuration. - - -| `--show-log` -| Use this flag to see the logs. - -| `-f`, `--force` -| Use this flag to tell the command not to prompt the user for confirmation. - -| `-v`, `--v` -| Set the verbosity level. See link:https://github.com/redhat-developer/odo/wiki/Logging-in-odo[Logging in odo] for more information. - -| `-h`, `--help` -| Output the help for a command. - -|=== - -[NOTE] -==== -Some flags might not be available for some commands. Run the command with the `--help` flag to get a list of all the available flags. -==== diff --git a/modules/developer-cli-odo-ref-json-output.adoc b/modules/developer-cli-odo-ref-json-output.adoc deleted file mode 100644 index d9d850b4318d..000000000000 --- a/modules/developer-cli-odo-ref-json-output.adoc +++ /dev/null @@ -1,144 +0,0 @@ -:_content-type: REFERENCE -[id="odo-json-output_{context}"] -= JSON output - -The `odo` commands that output content generally accept a `-o json` flag to output this content in JSON format, suitable for other programs to parse this output more easily. - -The output structure is similar to Kubernetes resources, with the `kind`, `apiVersion`, `metadata`, `spec`, and `status` fields. - -_List_ commands return a `List` resource, containing an `items` (or similar) field listing the items of the list, with each item also being similar to Kubernetes resources. - -_Delete_ commands return a `Status` resource; see the link:https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/status/[Status Kubernetes resource]. - -Other commands return a resource associated with the command, for example, `Application`, `Storage`, `URL`, and so on. - -The full list of commands currently accepting the `-o json` flag is: - -|=== -| Commands | Kind (version) | Kind (version) of list items | Complete content? - -| odo application describe -| Application (odo.dev/v1alpha1) -| _n/a_ -| no - -| odo application list -| List (odo.dev/v1alpha1) -| Application (odo.dev/v1alpha1) -| ? - -| odo catalog list components -| List (odo.dev/v1alpha1) -| _missing_ -| yes - -| odo catalog list services -| List (odo.dev/v1alpha1) -| ClusterServiceVersion (operators.coreos.com/v1alpha1) -| ? - -| odo catalog describe component -| _missing_ -| _n/a_ -| yes - -| odo catalog describe service -| CRDDescription (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo component create -| Component (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo component describe -| Component (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo component list -| List (odo.dev/v1alpha1) -| Component (odo.dev/v1alpha1) -| yes - -| odo config view -| DevfileConfiguration (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo debug info -| OdoDebugInfo (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo env view -| EnvInfo (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo preference view -| PreferenceList (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo project create -| Project (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo project delete -| Status (v1) -| _n/a_ -| yes - -| odo project get -| Project (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo project list -| List (odo.dev/v1alpha1) -| Project (odo.dev/v1alpha1) -| yes - -| odo registry list -| List (odo.dev/v1alpha1) -| _missing_ -| yes - -| odo service create -| Service -| _n/a_ -| yes - -| odo service describe -| Service -| _n/a_ -| yes - -| odo service list -| List (odo.dev/v1alpha1) -| Service -| yes - -| odo storage create -| Storage (odo.dev/v1alpha1) -| _n/a_ -| yes - -| odo storage delete -| Status (v1) -| _n/a_ -| yes - -| odo storage list -| List (odo.dev/v1alpha1) -| Storage (odo.dev/v1alpha1) -| yes - -| odo url list -| List (odo.dev/v1alpha1) -| URL (odo.dev/v1alpha1) -| yes -|=== diff --git a/modules/developer-cli-odo-ref-link.adoc b/modules/developer-cli-odo-ref-link.adoc deleted file mode 100644 index faf1b4d1be2f..000000000000 --- a/modules/developer-cli-odo-ref-link.adoc +++ /dev/null @@ -1,491 +0,0 @@ -:_content-type: REFERENCE -[id="odo-link_{context}"] -= odo link - - -The `odo link` command helps link an `odo` component to an Operator-backed service or another `odo` component. It does this by using the link:https://github.com/redhat-developer/service-binding-operator[Service Binding Operator]. Currently, `odo` makes use of the Service Binding library and not the Operator itself to achieve the desired functionality. - -//// -In this document we will cover various options to create link between a component & a service, and a component & another component. The steps in this document are going to be based on the https://github.com/dharmit/odo-quickstart/[odo quickstart project] that we covered in link:/docs/getting-started/quickstart[Quickstart guide]. The outputs mentioned in this document are based on commands executed on link:/docs/getting-started/cluster-setup/kubernetes[minikube cluster]. - -This document assumes that you know how to link:/docs/command-reference/create[create components] and link:/docs/command-reference/service[services]. It also assumes that you have cloned the https://github.com/dharmit/odo-quickstart/[odo quickstart project]. Terminology used in this document: - -* _quickstart project_: git clone of the odo quickstart project having below directory structure: -+ -[,shell] ----- - $ tree -L 1 - . - ├── backend - ├── frontend - ├── postgrescluster.yaml - ├── quickstart.code-workspace - └── README.md - - 2 directories, 3 files ----- - -* _backend component_: `backend` directory in above tree structure -* _frontend component_: `frontend` directory in above tree structure -* _Postgres service_: Operator backed service created from _backend component_ using the `odo service create --from-file ../postgrescluster.yaml` command. -//// - -== Various linking options - -`odo` provides various options for linking a component with an Operator-backed service or another `odo` component. All these options (or flags) can be used whether you are linking a component to a service or to another component. - -=== Default behavior - -By default, the `odo link` command creates a directory named `kubernetes/` in your component directory and stores the information (YAML manifests) about services and links there. When you use `odo push`, `odo` compares these manifests with the state of the resources on the Kubernetes cluster and decides whether it needs to create, modify or destroy resources to match what is specified by the user. - -=== The `--inlined` flag - -If you specify the `--inlined` flag to the `odo link` command, `odo` stores the link information inline in the `devfile.yaml` in the component directory, instead of creating a file under the `kubernetes/` directory. The behavior of the `--inlined` flag is similar in both the `odo link` and `odo service create` commands. This flag is helpful if you want everything stored in a single `devfile.yaml`. You have to remember to use `--inlined` flag with each `odo link` and `odo service create` command that you execute for the component. - -=== The `--map` flag - -Sometimes, you might want to add more binding information to the component, in addition to what is available by default. For example, if you are linking the component with a service and would like to bind some information from the service's spec (short for specification), you could use the `--map` flag. Note that `odo` does not do any validation against the spec of the service or component being linked. Using this flag is only recommended if you are comfortable using the Kubernetes YAML manifests. - -=== The `--bind-as-files` flag - -For all the linking options discussed so far, `odo` injects the binding information into the component as environment variables. If you would like to mount this information as files instead, you can use the `--bind-as-files` flag. This will make `odo` inject the binding information as files into the `/bindings` location within your component's Pod. Compared to the environment variables scenario, when you use `--bind-as-files`, the files are named after the keys and the value of these keys is stored as the contents of these files. - -== Examples - -=== Default `odo link` - -In the following example, the backend component is linked with the PostgreSQL service using the default `odo link` command. For the backend component, make sure that your component and service are pushed to the cluster: - -[source,terminal] ----- -$ odo list ----- - -.Sample output -[source,terminal] ----- -APP NAME PROJECT TYPE STATE MANAGED BY ODO -app backend myproject spring Pushed Yes ----- - -[source,terminal] ----- -$ odo service list ----- - -.Sample output -[source,terminal] ----- -NAME MANAGED BY ODO STATE AGE -PostgresCluster/hippo Yes (backend) Pushed 59m41s ----- - -Now, run `odo link` to link the backend component with the PostgreSQL service: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo ----- - - -.Example output -[source,terminal] ----- - ✓ Successfully created link between component "backend" and service "PostgresCluster/hippo" - -To apply the link, please use `odo push` ----- - -And then run `odo push` to actually create the link on the Kubernetes cluster. - -After a successful `odo push`, you will see a few outcomes: - -. When you open the URL for the application deployed by backend component, it shows a list of `todo` items in the database. For example, in the output for the `odo url list` command, the path where `todos` are listed is included: -+ -[source,terminal] ----- -$ odo url list ----- -+ -.Sample output -[source,terminal] ----- -Found the following URLs for component backend -NAME STATE URL PORT SECURE KIND -8080-tcp Pushed http://8080-tcp.192.168.39.112.nip.io 8080 false ingress ----- -+ -The correct path for the URL would be \http://8080-tcp.192.168.39.112.nip.io/api/v1/todos. The exact URL depends on your setup. Also note that there are no `todos` in the database unless you add some, so the URL might just show an empty JSON object. - -. You can see binding information related to the Postgres service injected into the backend component. This binding information is injected, by default, as environment variables. You can check it using the `odo describe` command from the backend component's directory: -+ -[source,terminal] ----- -$ odo describe ----- -+ -.Example output: -[source,terminal] ----- -Component Name: backend -Type: spring -Environment Variables: - · PROJECTS_ROOT=/projects - · PROJECT_SOURCE=/projects - · DEBUG_PORT=5858 -Storage: - · m2 of size 3Gi mounted to /home/user/.m2 -URLs: - · http://8080-tcp.192.168.39.112.nip.io exposed via 8080 -Linked Services: - · PostgresCluster/hippo - Environment Variables: - · POSTGRESCLUSTER_PGBOUNCER-EMPTY - · POSTGRESCLUSTER_PGBOUNCER.INI - · POSTGRESCLUSTER_ROOT.CRT - · POSTGRESCLUSTER_VERIFIER - · POSTGRESCLUSTER_ID_ECDSA - · POSTGRESCLUSTER_PGBOUNCER-VERIFIER - · POSTGRESCLUSTER_TLS.CRT - · POSTGRESCLUSTER_PGBOUNCER-URI - · POSTGRESCLUSTER_PATRONI.CRT-COMBINED - · POSTGRESCLUSTER_USER - · pgImage - · pgVersion - · POSTGRESCLUSTER_CLUSTERIP - · POSTGRESCLUSTER_HOST - · POSTGRESCLUSTER_PGBACKREST_REPO.CONF - · POSTGRESCLUSTER_PGBOUNCER-USERS.TXT - · POSTGRESCLUSTER_SSH_CONFIG - · POSTGRESCLUSTER_TLS.KEY - · POSTGRESCLUSTER_CONFIG-HASH - · POSTGRESCLUSTER_PASSWORD - · POSTGRESCLUSTER_PATRONI.CA-ROOTS - · POSTGRESCLUSTER_DBNAME - · POSTGRESCLUSTER_PGBOUNCER-PASSWORD - · POSTGRESCLUSTER_SSHD_CONFIG - · POSTGRESCLUSTER_PGBOUNCER-FRONTEND.KEY - · POSTGRESCLUSTER_PGBACKREST_INSTANCE.CONF - · POSTGRESCLUSTER_PGBOUNCER-FRONTEND.CA-ROOTS - · POSTGRESCLUSTER_PGBOUNCER-HOST - · POSTGRESCLUSTER_PORT - · POSTGRESCLUSTER_ROOT.KEY - · POSTGRESCLUSTER_SSH_KNOWN_HOSTS - · POSTGRESCLUSTER_URI - · POSTGRESCLUSTER_PATRONI.YAML - · POSTGRESCLUSTER_DNS.CRT - · POSTGRESCLUSTER_DNS.KEY - · POSTGRESCLUSTER_ID_ECDSA.PUB - · POSTGRESCLUSTER_PGBOUNCER-FRONTEND.CRT - · POSTGRESCLUSTER_PGBOUNCER-PORT - · POSTGRESCLUSTER_CA.CRT ----- -+ -Some of these variables are used in the backend component's `src/main/resources/application.properties` file so that the Java Spring Boot application can connect to the PostgreSQL database service. - -. Lastly, `odo` has created a directory called `kubernetes/` in your backend component's directory that contains the following files: -+ -[source,terminal] ----- -$ ls kubernetes -odo-service-backend-postgrescluster-hippo.yaml odo-service-hippo.yaml ----- -+ -These files contain the information (YAML manifests) for two resources: - -.. `odo-service-hippo.yaml` - the Postgres _service_ created using `odo service create --from-file ../postgrescluster.yaml` command. -.. `odo-service-backend-postgrescluster-hippo.yaml` - the _link_ created using `odo link` command. - -=== Using odo link with the --inlined flag - -Using the `--inlined` flag with the `odo link` command has the same effect as an `odo link` command without the flag, in that it injects binding information. However, the subtle difference is that in the above case, there are two manifest files under `kubernetes/` directory, one for the Postgres service and another for the link between the backend component and this service. However, when you pass the `--inlined` flag, `odo` does not create a file under the `kubernetes/` directory to store the YAML manifest, but rather stores it inline in the `devfile.yaml` file. - -To see this, unlink the component from the PostgreSQL service first: - -[source,terminal] ----- -$ odo unlink PostgresCluster/hippo ----- - -.Example output: -[source,terminal] ----- - ✓ Successfully unlinked component "backend" from service "PostgresCluster/hippo" - -To apply the changes, please use `odo push` ----- - -To unlink them on the cluster, run `odo push`. Now if you inspect the `kubernetes/` directory, you see only one file: - -[source,terminal] ----- -$ ls kubernetes -odo-service-hippo.yaml ----- - -Next, use the `--inlined` flag to create a link: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo --inlined ----- - -.Example output: -[source,terminal] ----- - ✓ Successfully created link between component "backend" and service "PostgresCluster/hippo" - -To apply the link, please use `odo push` ----- - -You need to run `odo push` for the link to get created on the cluster, like the procedure that omits the `--inlined` flag. `odo` stores the configuration in `devfile.yaml`. In this file, you can see an entry like the following: - -[source,yaml] ----- - kubernetes: - inlined: | - apiVersion: binding.operators.coreos.com/v1alpha1 - kind: ServiceBinding - metadata: - creationTimestamp: null - name: backend-postgrescluster-hippo - spec: - application: - group: apps - name: backend-app - resource: deployments - version: v1 - bindAsFiles: false - detectBindingResources: true - services: - - group: postgres-operator.crunchydata.com - id: hippo - kind: PostgresCluster - name: hippo - version: v1beta1 - status: - secret: "" - name: backend-postgrescluster-hippo ----- - -Now if you were to run `odo unlink PostgresCluster/hippo`, `odo` would first remove the link information from the `devfile.yaml`, and then a subsequent `odo push` would delete the link from the cluster. - -=== Custom bindings - -`odo link` accepts the flag `--map` which can inject custom binding information into the component. Such binding information will be fetched from the manifest of the resource that you are linking to your component. For example, in the context of the backend component and PostgreSQL service, you can inject information from the PostgreSQL service's manifest `postgrescluster.yaml` file into the backend component. - -If the name of your `PostgresCluster` service is `hippo` (or the output of `odo service list`, if your PostgresCluster service is named differently), when you want to inject the value of `postgresVersion` from that YAML definition into your backend component, run the command: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo --map pgVersion='{{ .hippo.spec.postgresVersion }}' ----- - -Note that, if the name of your Postgres service is different from `hippo`, you will have to specify that in the above command in the place of `.hippo` in the value for `pgVersion`. - -After a link operation, run `odo push` as usual. Upon successful completion of the push operation, you can run the following command from your backend component directory, to validate if the custom mapping got injected properly: - -[source,terminal] ----- -$ odo exec -- env | grep pgVersion ----- - -.Example output: -[source,terminal] ----- -pgVersion=13 ----- - -Since you might want to inject more than just one piece of custom binding information, `odo link` accepts multiple key-value pairs of mappings. The only constraint is that these should be specified as `--map <key>=<value>`. For example, if you want to also inject PostgreSQL image information along with the version, you could run: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo --map pgVersion='{{ .hippo.spec.postgresVersion }}' --map pgImage='{{ .hippo.spec.image }}' ----- - -and then run `odo push`. To validate if both the mappings got injected correctly, run the following command: - -[source,terminal] ----- -$ odo exec -- env | grep -e "pgVersion\|pgImage" ----- - -.Example output: -[source,terminal] ----- -pgVersion=13 -pgImage=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 ----- - -==== To inline or not? - -You can accept the default behavior where `odo link` generate a manifests file for the link under `kubernetes/` directory. Alternatively, you can use the `--inlined` flag if you prefer to store everything in a single `devfile.yaml` file. - -== Binding as files - -Another helpful flag that `odo link` provides is `--bind-as-files`. When this flag is passed, the binding information is not injected into the component's Pod as environment variables but is mounted as a filesystem. - -Ensure that there are no existing links between the backend component and the PostgreSQL service. You could do this by running `odo describe` in the backend component's directory and check if you see output similar to the following: - -[source,terminal] ----- -Linked Services: - · PostgresCluster/hippo ----- - -Unlink the service from the component using: - -[source,terminal] ----- -$ odo unlink PostgresCluster/hippo -$ odo push ----- - -== --bind-as-files examples - -=== Using the default odo link - -By default, `odo` creates the manifest file under the `kubernetes/` directory, for storing the link information. Link the backend component and PostgreSQL service using: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo --bind-as-files -$ odo push ----- - -.Example `odo describe` output: -[source,terminal] ----- -$ odo describe - -Component Name: backend -Type: spring -Environment Variables: - · PROJECTS_ROOT=/projects - · PROJECT_SOURCE=/projects - · DEBUG_PORT=5858 - · SERVICE_BINDING_ROOT=/bindings - · SERVICE_BINDING_ROOT=/bindings -Storage: - · m2 of size 3Gi mounted to /home/user/.m2 -URLs: - · http://8080-tcp.192.168.39.112.nip.io exposed via 8080 -Linked Services: - · PostgresCluster/hippo - Files: - · /bindings/backend-postgrescluster-hippo/pgbackrest_instance.conf - · /bindings/backend-postgrescluster-hippo/user - · /bindings/backend-postgrescluster-hippo/ssh_known_hosts - · /bindings/backend-postgrescluster-hippo/clusterIP - · /bindings/backend-postgrescluster-hippo/password - · /bindings/backend-postgrescluster-hippo/patroni.yaml - · /bindings/backend-postgrescluster-hippo/pgbouncer-frontend.crt - · /bindings/backend-postgrescluster-hippo/pgbouncer-host - · /bindings/backend-postgrescluster-hippo/root.key - · /bindings/backend-postgrescluster-hippo/pgbouncer-frontend.key - · /bindings/backend-postgrescluster-hippo/pgbouncer.ini - · /bindings/backend-postgrescluster-hippo/uri - · /bindings/backend-postgrescluster-hippo/config-hash - · /bindings/backend-postgrescluster-hippo/pgbouncer-empty - · /bindings/backend-postgrescluster-hippo/port - · /bindings/backend-postgrescluster-hippo/dns.crt - · /bindings/backend-postgrescluster-hippo/pgbouncer-uri - · /bindings/backend-postgrescluster-hippo/root.crt - · /bindings/backend-postgrescluster-hippo/ssh_config - · /bindings/backend-postgrescluster-hippo/dns.key - · /bindings/backend-postgrescluster-hippo/host - · /bindings/backend-postgrescluster-hippo/patroni.crt-combined - · /bindings/backend-postgrescluster-hippo/pgbouncer-frontend.ca-roots - · /bindings/backend-postgrescluster-hippo/tls.key - · /bindings/backend-postgrescluster-hippo/verifier - · /bindings/backend-postgrescluster-hippo/ca.crt - · /bindings/backend-postgrescluster-hippo/dbname - · /bindings/backend-postgrescluster-hippo/patroni.ca-roots - · /bindings/backend-postgrescluster-hippo/pgbackrest_repo.conf - · /bindings/backend-postgrescluster-hippo/pgbouncer-port - · /bindings/backend-postgrescluster-hippo/pgbouncer-verifier - · /bindings/backend-postgrescluster-hippo/id_ecdsa - · /bindings/backend-postgrescluster-hippo/id_ecdsa.pub - · /bindings/backend-postgrescluster-hippo/pgbouncer-password - · /bindings/backend-postgrescluster-hippo/pgbouncer-users.txt - · /bindings/backend-postgrescluster-hippo/sshd_config - · /bindings/backend-postgrescluster-hippo/tls.crt ----- - -Everything that was an environment variable in the `key=value` format in the earlier `odo describe` output is now mounted as a file. Use the `cat` command to view the contents of some of these files: - -.Example command: -[source,terminal] ----- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/password ----- - -.Example output: -[source,terminal] ----- -q({JC:jn^mm/Bw}eu+j.GX{k ----- - -.Example command: -[source,terminal] ----- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/user ----- - -.Example output: -[source,terminal] ----- -hippo ----- - -.Example command: -[source,terminal] ----- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/clusterIP ----- - -.Example output: -[source,terminal] ----- -10.101.78.56 ----- - -=== Using `--inlined` - -The result of using `--bind-as-files` and `--inlined` together is similar to using `odo link --inlined`. The manifest of the link gets stored in the `devfile.yaml`, instead of being stored in a separate file under `kubernetes/` directory. Other than that, the `odo describe` output would be the same as earlier. - -=== Custom bindings - -When you pass custom bindings while linking the backend component with the PostgreSQL service, these custom bindings are injected not as environment variables but are mounted as files. For example: - -[source,terminal] ----- -$ odo link PostgresCluster/hippo --map pgVersion='{{ .hippo.spec.postgresVersion }}' --map pgImage='{{ .hippo.spec.image }}' --bind-as-files -$ odo push ----- - -These custom bindings get mounted as files instead of being injected as environment variables. To validate that this worked, run the following command: - -.Example command: -[source,terminal] ----- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/pgVersion ----- - -.Example output: -[source,terminal] ----- -13 ----- - -.Example command: -[source,terminal] ----- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/pgImage ----- - -.Example output: -[source,terminal] ----- -registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 ----- diff --git a/modules/developer-cli-odo-ref-registry.adoc b/modules/developer-cli-odo-ref-registry.adoc deleted file mode 100644 index abc4734bc63d..000000000000 --- a/modules/developer-cli-odo-ref-registry.adoc +++ /dev/null @@ -1,92 +0,0 @@ -:_content-type: REFERENCE -[id="odo-registry_{context}"] -= odo registry - - -`odo` uses the portable _devfile_ format to describe the components. `odo` can connect to various devfile registries, to download devfiles for different languages and frameworks. - -You can connect to publicly available devfile registries, or you can install your own _Secure Registry_. - -You can use the `odo registry` command to manage the registries that are used by `odo` to retrieve devfile information. - -== Listing the registries - -To list the registries currently contacted by `odo`, run the command: - -[source,terminal] ----- -$ odo registry list ----- - -.Example output: -[source,terminal] ----- -NAME URL SECURE -DefaultDevfileRegistry https://registry.devfile.io No ----- - - -`DefaultDevfileRegistry` is the default registry used by odo; it is provided by the https://devfile.io[devfile.io] project. - -== Adding a registry - -To add a registry, run the command: - -[source,terminal] ----- -$ odo registry add ----- - -.Example output: -[source,terminal] ----- -$ odo registry add StageRegistry https://registry.stage.devfile.io -New registry successfully added ----- - - -If you are deploying your own Secure Registry, you can specify the personal access token to authenticate to the secure registry with the `--token` flag: - -[source,terminal] ----- -$ odo registry add MyRegistry https://myregistry.example.com --token <access_token> -New registry successfully added ----- - -== Deleting a registry - -To delete a registry, run the command: - -[source,terminal] ----- -$ odo registry delete ----- - -.Example output: -[source,terminal] ----- -$ odo registry delete StageRegistry -? Are you sure you want to delete registry "StageRegistry" Yes -Successfully deleted registry ----- - -Use the `--force` (or `-f`) flag to force the deletion of the registry without confirmation. - -== Updating a registry - -To update the URL or the personal access token of a registry already registered, run the command: - -[source,terminal] ----- -$ odo registry update ----- - -.Example output: -[source,terminal] ----- - $ odo registry update MyRegistry https://otherregistry.example.com --token <other_access_token> - ? Are you sure you want to update registry "MyRegistry" Yes - Successfully updated registry ----- - -Use the `--force` (or `-f`) flag to force the update of the registry without confirmation. diff --git a/modules/developer-cli-odo-ref-service.adoc b/modules/developer-cli-odo-ref-service.adoc deleted file mode 100644 index e75982c528f8..000000000000 --- a/modules/developer-cli-odo-ref-service.adoc +++ /dev/null @@ -1,298 +0,0 @@ -:_content-type: REFERENCE -[id="odo-service_{context}"] -= odo service - -`odo` can deploy _services_ with the help of _Operators_. - -The list of available Operators and services available for installation can be found using the `odo catalog` command. - -Services are created in the context of a _component_, so run the `odo create` command before you deploy services. - -A service is deployed using two steps: - -. Define the service and store its definition in the devfile. -. Deploy the defined service to the cluster, using the `odo push` command. - -== Creating a new service - -To create a new service, run the command: - -[source,terminal] ----- -$ odo service create ----- - -For example, to create an instance of a Redis service named `my-redis-service`, you can run the following command: - -.Example output -[source,terminal] ----- -$ odo catalog list services -Services available through Operators -NAME CRDs -redis-operator.v0.8.0 RedisCluster, Redis - -$ odo service create redis-operator.v0.8.0/Redis my-redis-service -Successfully added service to the configuration; do 'odo push' to create service on the cluster ----- - -This command creates a Kubernetes manifest in the `kubernetes/` directory, containing the definition of the service, and this file is referenced from the `devfile.yaml` file. - -[source,terminal] ----- -$ cat kubernetes/odo-service-my-redis-service.yaml ----- - -.Example output -[source,yaml] ----- - apiVersion: redis.redis.opstreelabs.in/v1beta1 - kind: Redis - metadata: - name: my-redis-service - spec: - kubernetesConfig: - image: quay.io/opstree/redis:v6.2.5 - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 101m - memory: 128Mi - requests: - cpu: 101m - memory: 128Mi - serviceType: ClusterIP - redisExporter: - enabled: false - image: quay.io/opstree/redis-exporter:1.0 - storage: - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ----- - -.Example command -[source,terminal] ----- -$ cat devfile.yaml ----- - -.Example output -[source,yaml] ----- -[...] -components: -- kubernetes: - uri: kubernetes/odo-service-my-redis-service.yaml - name: my-redis-service -[...] ----- - - -Note that the name of the created instance is optional. If you do not provide a name, it will be the lowercase name of the service. For example, the following command creates an instance of a Redis service named `redis`: - -[source,terminal] ----- -$ odo service create redis-operator.v0.8.0/Redis ----- - -=== Inlining the manifest - -By default, a new manifest is created in the `kubernetes/` directory, referenced from the `devfile.yaml` file. It is possible to inline the manifest inside the `devfile.yaml` file using the `--inlined` flag: - -[source,terminal] ----- -$ odo service create redis-operator.v0.8.0/Redis my-redis-service --inlined -Successfully added service to the configuration; do 'odo push' to create service on the cluster ----- - - -.Example command -[source,terminal] ----- -$ cat devfile.yaml ----- - -.Example output -[source,yaml] ----- -[...] -components: -- kubernetes: - inlined: | - apiVersion: redis.redis.opstreelabs.in/v1beta1 - kind: Redis - metadata: - name: my-redis-service - spec: - kubernetesConfig: - image: quay.io/opstree/redis:v6.2.5 - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 101m - memory: 128Mi - requests: - cpu: 101m - memory: 128Mi - serviceType: ClusterIP - redisExporter: - enabled: false - image: quay.io/opstree/redis-exporter:1.0 - storage: - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - name: my-redis-service -[...] ----- - -=== Configuring the service - -Without specific customization, the service will be created with a default configuration. You can use either command-line arguments or a file to specify your own configuration. - -==== Using command-line arguments - -Use the `--parameters` (or `-p`) flag to specify your own configuration. - -The following example configures the Redis service with three parameters: - -[source,terminal] ----- -$ odo service create redis-operator.v0.8.0/Redis my-redis-service \ - -p kubernetesConfig.image=quay.io/opstree/redis:v6.2.5 \ - -p kubernetesConfig.serviceType=ClusterIP \ - -p redisExporter.image=quay.io/opstree/redis-exporter:1.0 -Successfully added service to the configuration; do 'odo push' to create service on the cluster ----- - -.Example command -[source,terminal] ----- -$ cat kubernetes/odo-service-my-redis-service.yaml ----- - -.Example output -[source,yaml] ----- -apiVersion: redis.redis.opstreelabs.in/v1beta1 -kind: Redis -metadata: - name: my-redis-service -spec: - kubernetesConfig: - image: quay.io/opstree/redis:v6.2.5 - serviceType: ClusterIP - redisExporter: - image: quay.io/opstree/redis-exporter:1.0 ----- - -You can obtain the possible parameters for a specific service using the `odo catalog describe service` command. - -==== Using a file - -Use a YAML manifest to configure your own specification. In the following example, the Redis service is configured with three parameters. - -. Create a manifest: -+ -[source,terminal] ----- -$ cat > my-redis.yaml <<EOF -apiVersion: redis.redis.opstreelabs.in/v1beta1 -kind: Redis -metadata: - name: my-redis-service -spec: - kubernetesConfig: - image: quay.io/opstree/redis:v6.2.5 - serviceType: ClusterIP - redisExporter: - image: quay.io/opstree/redis-exporter:1.0 -EOF ----- - -. Create the service from the manifest: -+ -[source,terminal] ----- -$ odo service create --from-file my-redis.yaml -Successfully added service to the configuration; do 'odo push' to create service on the cluster ----- - -== Deleting a service - -To delete a service, run the command: - -[source,terminal] ----- -$ odo service delete ----- - -.Example output -[source,terminal] ----- -$ odo service list -NAME MANAGED BY ODO STATE AGE -Redis/my-redis-service Yes (api) Deleted locally 5m39s ----- - -[source,terminal] ----- -$ odo service delete Redis/my-redis-service -? Are you sure you want to delete Redis/my-redis-service Yes -Service "Redis/my-redis-service" has been successfully deleted; do 'odo push' to delete service from the cluster ----- - -Use the `--force` (or `-f`) flag to force the deletion of the service without confirmation. - -== Listing services - -To list the services created for your component, run the command: - -[source,terminal] ----- -$ odo service list ----- - -.Example output -[source,terminal] ----- -$ odo service list -NAME MANAGED BY ODO STATE AGE -Redis/my-redis-service-1 Yes (api) Not pushed -Redis/my-redis-service-2 Yes (api) Pushed 52s -Redis/my-redis-service-3 Yes (api) Deleted locally 1m22s ----- - -For each service, `STATE` indicates if the service has been pushed to the cluster using the `odo push` command, or if the service is still running on the cluster but removed from the devfile locally using the `odo service delete` command. - -== Getting information about a service - -To get details of a service such as its kind, version, name, and list of configured parameters, run the command: - -[source,terminal] ----- -$ odo service describe ----- - -.Example output -[source,terminal] ----- -$ odo service describe Redis/my-redis-service -Version: redis.redis.opstreelabs.in/v1beta1 -Kind: Redis -Name: my-redis-service -Parameters: -NAME VALUE -kubernetesConfig.image quay.io/opstree/redis:v6.2.5 -kubernetesConfig.serviceType ClusterIP -redisExporter.image quay.io/opstree/redis-exporter:1.0 ----- \ No newline at end of file diff --git a/modules/developer-cli-odo-ref-storage.adoc b/modules/developer-cli-odo-ref-storage.adoc deleted file mode 100644 index 467337948839..000000000000 --- a/modules/developer-cli-odo-ref-storage.adoc +++ /dev/null @@ -1,123 +0,0 @@ -:_content-type: REFERENCE -[id="odo-storage_{context}"] -= odo storage - - -`odo` lets users manage storage volumes that are attached to the components. A storage volume can be either an ephemeral volume using an `emptyDir` Kubernetes volume, or a link:https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim[Persistent Volume Claim] (PVC). A PVC allows users to claim a persistent volume (such as a GCE PersistentDisk or an iSCSI volume) without understanding the details of the particular cloud environment. The persistent storage volume can be used to persist data across restarts and rebuilds of the component. - -== Adding a storage volume - -To add a storage volume to the cluster, run the command: - -[source,terminal] ----- -$ odo storage create ----- - -.Example output: -[source,terminal] ----- -$ odo storage create store --path /data --size 1Gi -✓ Added storage store to nodejs-project-ufyy - -$ odo storage create tempdir --path /tmp --size 2Gi --ephemeral -✓ Added storage tempdir to nodejs-project-ufyy - -Please use `odo push` command to make the storage accessible to the component ----- - - -In the above example, the first storage volume has been mounted to the `/data` path and has a size of `1Gi`, and the second volume has been mounted to `/tmp` and is ephemeral. - -== Listing the storage volumes - -To check the storage volumes currently used by the component, run the command: - -[source,terminal] ----- -$ odo storage list ----- - -.Example output: -[source,terminal] ----- -$ odo storage list -The component 'nodejs-project-ufyy' has the following storage attached: -NAME SIZE PATH STATE -store 1Gi /data Not Pushed -tempdir 2Gi /tmp Not Pushed ----- - -== Deleting a storage volume - -To delete a storage volume, run the command: - -[source,terminal] ----- -$ odo storage delete ----- - -.Example output: -[source,terminal] ----- -$ odo storage delete store -f -Deleted storage store from nodejs-project-ufyy - -Please use `odo push` command to delete the storage from the cluster ----- - -In the above example, using the `-f` flag force deletes the storage without asking user permission. - -== Adding storage to specific container - -If your devfile has multiple containers, you can specify which container you want the storage to attach to, using the `--container` flag in the `odo storage create` command. - -The following example is an excerpt from a devfile with multiple containers : - -[source,yaml] ----- -components: - - name: nodejs1 - container: - image: registry.access.redhat.com/ubi8/nodejs-12:1-36 - memoryLimit: 1024Mi - endpoints: - - name: "3000-tcp" - targetPort: 3000 - mountSources: true - - name: nodejs2 - container: - image: registry.access.redhat.com/ubi8/nodejs-12:1-36 - memoryLimit: 1024Mi ----- - -In the example, there are two containers,`nodejs1` and `nodejs2`. To attach storage to the `nodejs2` container, use the following command: - -[source,terminal] ----- -$ odo storage create --container ----- - -.Example output: -[source,terminal] ----- -$ odo storage create store --path /data --size 1Gi --container nodejs2 -✓ Added storage store to nodejs-testing-xnfg - -Please use `odo push` command to make the storage accessible to the component ----- - -You can list the storage resources, using the `odo storage list` command: - -[source,terminal] ----- -$ odo storage list ----- - -.Example output: -[source,terminal] ----- -The component 'nodejs-testing-xnfg' has the following storage attached: -NAME SIZE PATH CONTAINER STATE -store 1Gi /data nodejs2 Not Pushed ----- \ No newline at end of file diff --git a/modules/developer-cli-odo-sample-applications-binary.adoc b/modules/developer-cli-odo-sample-applications-binary.adoc deleted file mode 100644 index b921553ac80f..000000000000 --- a/modules/developer-cli-odo-sample-applications-binary.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/using-sample-applications.adoc - -[id="odo-sample-applications-binary_{context}"] -= Binary example applications - -Use the following commands to build and run sample applications from a binary file for a particular runtime. - -[id="odo-sample-applications-binary-java_{context}"] -== java - -Java can be used to deploy a binary artifact as follows: - -[source,terminal] ----- -$ git clone https://github.com/spring-projects/spring-petclinic.git -$ cd spring-petclinic -$ mvn package -$ odo create java test3 --binary target/*.jar -$ odo push ----- - - -//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623 -//// -[id="odo-sample-applications-binary-wildfly_{context}"] -== wildfly - -WildFly can be used to deploy a binary application as follows: - -[source,terminal] ----- -$ git clone https://github.com/openshiftdemos/os-sample-java-web.git -$ cd os-sample-java-web -$ mvn package -$ cd .. -$ mkdir example && cd example -$ mv ../os-sample-java-web/target/ROOT.war example.war -$ odo create wildfly --binary example.war ----- -//// diff --git a/modules/developer-cli-odo-sample-applications-git.adoc b/modules/developer-cli-odo-sample-applications-git.adoc deleted file mode 100644 index f2bc7361ed8e..000000000000 --- a/modules/developer-cli-odo-sample-applications-git.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/using-sample-applications.adoc - -[id="odo-sample-applications-github_{context}"] -= Git repository example applications - -Use the following commands to build and run sample applications from a Git repository for a particular runtime. - -[id="odo-sample-applications-github-httpd_{context}"] -== httpd - -This example helps build and serve static content using httpd on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/httpd-container/blob/master/2.4/root/usr/share/container-scripts/httpd/README.md[Apache HTTP Server container image repository]. - -[source,terminal] ----- -$ odo create httpd --git https://github.com/openshift/httpd-ex.git ----- - -[id="odo-sample-applications-github-java_{context}"] -== java - -This example helps build and run fat JAR Java applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/fabric8io-images/s2i/blob/master/README.md[Java S2I Builder image]. - -[source,terminal] ----- -$ odo create java --git https://github.com/spring-projects/spring-petclinic.git ----- - -[id="odo-sample-applications-github-nodejs_{context}"] -== nodejs - -Build and run Node.js applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md[Node.js 8 container image]. - -[source,terminal] ----- -$ odo create nodejs --git https://github.com/openshift/nodejs-ex.git ----- - -[id="odo-sample-applications-github-perl_{context}"] -== perl - -This example helps build and run Perl applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-perl-container/blob/master/5.26/README.md[Perl 5.26 container image]. - -[source,terminal] ----- -$ odo create perl --git https://github.com/openshift/dancer-ex.git ----- - -[id="odo-sample-applications-github-php_{context}"] -== php - -This example helps build and run PHP applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md[PHP 7.1 Docker image]. - -[source,terminal] ----- -$ odo create php --git https://github.com/openshift/cakephp-ex.git ----- - -[id="odo-sample-applications-github-python_{context}"] -== python - -This example helps build and run Python applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md[Python 3.6 container image]. - -[source,terminal] ----- -$ odo create python --git https://github.com/openshift/django-ex.git ----- - -[id="odo-sample-applications-github-ruby_{context}"] -== ruby - -This example helps build and run Ruby applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see link:https://github.com/sclorg/s2i-ruby-container/blob/master/2.5/README.md[Ruby 2.5 container image]. - -[source,terminal] ----- -$ odo create ruby --git https://github.com/openshift/ruby-ex.git ----- - -//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623 -//// -[id="odo-sample-applications-github-wildfly_{context}"] -== wildfly - -This example helps build and run WildFly applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/wildfly/wildfly-s2i/blob/master/README.md[Wildfly - CentOS Docker images for OpenShift]. - -[source,terminal] ----- -$ odo create wildfly --git https://github.com/openshift/openshift-jee-sample.git ----- -//// diff --git a/modules/developer-cli-odo-sample-applications-github.adoc b/modules/developer-cli-odo-sample-applications-github.adoc deleted file mode 100644 index 5b809f0af981..000000000000 --- a/modules/developer-cli-odo-sample-applications-github.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/using-sample-applications.adoc - -[id="odo-sample-applications-github_{context}"] -= Git repository sample application examples - -Use the following commands to build and run sample applications from a Git repository for a particular runtime. - -[id="odo-sample-applications-github-httpd_{context}"] -== httpd - -This example helps build and serve static content using httpd on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/httpd-container/blob/master/2.4/root/usr/share/container-scripts/httpd/README.md[Apache HTTP Server container image repository]. - -[source,terminal] ----- -$ odo create httpd --git https://github.com/openshift/httpd-ex.git ----- - -[id="odo-sample-applications-github-java_{context}"] -== java - -This example helps build and run fat JAR Java applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/fabric8io-images/s2i/blob/master/README.md[Java S2I Builder image]. - -[source,terminal] ----- -$ odo create java --git https://github.com/spring-projects/spring-petclinic.git ----- - -[id="odo-sample-applications-github-nodejs_{context}"] -== nodejs - -Build and run Node.js applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md[Node.js 8 container image]. - -[source,terminal] ----- -$ odo create nodejs --git https://github.com/openshift/nodejs-ex.git ----- - -[id="odo-sample-applications-github-perl_{context}"] -== perl - -This example helps build and run Perl applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-perl-container/blob/master/5.26/README.md[Perl 5.26 container image]. - -[source,terminal] ----- -$ odo create perl --git https://github.com/openshift/dancer-ex.git ----- - -[id="odo-sample-applications-github-php_{context}"] -== php - -This example helps build and run PHP applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md[PHP 7.1 Docker image]. - -[source,terminal] ----- -$ odo create php --git https://github.com/openshift/cakephp-ex.git ----- - -[id="odo-sample-applications-github-python_{context}"] -== python - -This example helps build and run Python applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md[Python 3.6 container image]. - -[source,terminal] ----- -$ odo create python --git https://github.com/openshift/django-ex.git ----- - -[id="odo-sample-applications-github-ruby_{context}"] -== ruby - -This example helps build and run Ruby applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see link:https://github.com/sclorg/s2i-ruby-container/blob/master/2.5/README.md[Ruby 2.5 container image]. - -[source,terminal] ----- -$ odo create ruby --git https://github.com/openshift/ruby-ex.git ----- - -//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623 -//// -[id="odo-sample-applications-github-wildfly_{context}"] -== wildfly - -This example helps build and run WildFly applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/wildfly/wildfly-s2i/blob/master/README.md[Wildfly - CentOS Docker images for OpenShift]. - -[source,terminal] ----- -$ odo create wildfly --git https://github.com/openshift/openshift-jee-sample.git ----- -//// diff --git a/modules/developer-cli-odo-set-config.adoc b/modules/developer-cli-odo-set-config.adoc deleted file mode 100644 index b946c88b6d88..000000000000 --- a/modules/developer-cli-odo-set-config.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -:_content-type: REFERENCE -[id="developer-cli-odo-set-config_{context}"] -= Setting a value - -You can set a value for a preference key by using the following command: - -[source,terminal] ----- -$ odo preference set <key> <value> ----- - -[NOTE] -==== -Preference keys are case-insensitive. -==== - -.Example command -[source,terminal] ----- -$ odo preference set updatenotification false ----- - -.Example output -[source,terminal] ----- -Global preference was successfully updated ----- diff --git a/modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc b/modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc deleted file mode 100644 index f386929b35d5..000000000000 --- a/modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc - -:_content-type: PROCEDURE -[id="setting-and-unsetting-environment-variables._{context}"] - -= Setting and unsetting environment variables - -.Procedure - -* To set an environment variable in a component: -+ -[source,terminal] ----- -$ odo config set --env <variable>=<value> ----- - -* To unset an environment variable in a component: -+ -[source,terminal] ----- -$ odo config unset --env <variable> ----- - -* To list all environment variables in a component: -+ -[source,terminal] ----- -$ odo config view ----- diff --git a/modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc b/modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc deleted file mode 100644 index a412f1509437..000000000000 --- a/modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// *cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc - -:_content-type: PROCEDURE -[id="switching-between-ephemeral-and-persistent-storage_{context}"] -= Switching between ephemeral and persistent storage - -You can switch between ephemeral and persistent storage in your project by using the `odo preference` command. `odo preference` modifies the global preference in your cluster. - -When persistent storage is enabled, the cluster stores the information between the restarts. - -When ephemeral storage is enabled, the cluster does not store the information between the restarts. - -Ephemeral storage is enabled by default. - -.Procedure - -. See the preference currently set in your project: -+ -[source,terminal] ----- -$ odo preference view ----- -+ -.Example output -+ -[source,terminal] ----- -PARAMETER CURRENT_VALUE -UpdateNotification -NamePrefix -Timeout -BuildTimeout -PushTimeout -Experimental -PushTarget -Ephemeral true ----- - -. To unset the ephemeral storage and set the persistent storage: -+ -[source,terminal] ----- -$ odo preference set Ephemeral false ----- - -. To set the ephemeral storage again: -+ -[source,terminal] ----- -$ odo preference set Ephemeral true ----- -+ -The `odo preference` command changes the global settings of all your currently deployed components as well as ones you will deploy in future. - -. Run `odo push` to make `odo` create a specified storage for your component: -+ -[source,terminal] ----- -$ odo push ----- diff --git a/modules/developer-cli-odo-unset-config.adoc b/modules/developer-cli-odo-unset-config.adoc deleted file mode 100644 index a18da10de1ce..000000000000 --- a/modules/developer-cli-odo-unset-config.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -:_content-type: REFERENCE -[id="developer-cli-odo-unset-config_{context}"] -= Unsetting a value - -You can unset a value for a preference key by using the following command: - -[source,terminal] ----- -$ odo preference unset <key> ----- - -[NOTE] -==== -You can use the `-f` flag to skip the confirmation. -==== - -.Example command -[source,terminal] ----- -$ odo preference unset updatenotification -? Do you want to unset updatenotification in the preference (y/N) y ----- - -.Example output -[source,terminal] ----- -Global preference was successfully updated ----- diff --git a/modules/developer-cli-odo-using-command-completion.adoc b/modules/developer-cli-odo-using-command-completion.adoc deleted file mode 100644 index b17ad00196d3..000000000000 --- a/modules/developer-cli-odo-using-command-completion.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -:_content-type: PROCEDURE -[id="using-command-completion_{context}"] -= Using command completion - -[NOTE] -==== -Currently command completion is only supported for bash, zsh, and fish shells. -==== - -{odo-title} provides a smart completion of command parameters based on user input. For this to work, {odo-title} needs to integrate with the executing shell. - -.Procedure - -* To install command completion automatically: -. Run: -+ -[source,terminal] ----- -$ odo --complete ----- -+ -. Press `y` when prompted to install the completion hook. - -* To install the completion hook manually, add `complete -o nospace -C <full_path_to_your_odo_binary> odo` to your shell configuration file. After any modification to your shell configuration file, restart your shell. - -* To disable completion: -. Run: -+ -[source,terminal] ----- -$ odo --uncomplete ----- -+ -. Press `y` when prompted to uninstall the completion hook. - -[NOTE] -==== -Re-enable command completion if you either rename the {odo-title} executable or move it to a different directory. -==== diff --git a/modules/developer-cli-odo-view-config.adoc b/modules/developer-cli-odo-view-config.adoc deleted file mode 100644 index d8cd5ef914ec..000000000000 --- a/modules/developer-cli-odo-view-config.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc - -:_content-type: REFERENCE -[id="developer-cli-odo-view-config_{context}"] -= Viewing the current configuration - -You can view the current `odo` CLI configuration by using the following command: - -[source,terminal] ----- -$ odo preference view ----- - -.Example output -[source,terminal] ----- -PARAMETER CURRENT_VALUE -UpdateNotification -NamePrefix -Timeout -BuildTimeout -PushTimeout -Ephemeral -ConsentTelemetry true ----- diff --git a/modules/differences-between-machinesets-and-machineconfigpool.adoc b/modules/differences-between-machinesets-and-machineconfigpool.adoc deleted file mode 100644 index be0e2f35297e..000000000000 --- a/modules/differences-between-machinesets-and-machineconfigpool.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/node-tasks.adoc -// * post_installation_configuration/cluster-tasks.adoc - - -:_content-type: CONCEPT -[id="differences-between-machinesets-and-machineconfigpool_{context}"] -= Understanding the difference between compute machine sets and the machine config pool - -`MachineSet` objects describe {product-title} nodes with respect to the cloud or machine provider. - -The `MachineConfigPool` object allows `MachineConfigController` components to define and provide the status of machines in the context of upgrades. - -The `MachineConfigPool` object allows users to configure how upgrades are rolled out to the {product-title} nodes in the machine config pool. - -The `NodeSelector` object can be replaced with a reference to the `MachineSet` object. diff --git a/modules/digging-into-machine-config.adoc b/modules/digging-into-machine-config.adoc deleted file mode 100644 index 7daead328690..000000000000 --- a/modules/digging-into-machine-config.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture_rhcos.adoc - -[id="digging-into-machine-config_{context}"] -= Changing Ignition configs after installation - -Machine config pools manage a cluster of nodes and their corresponding machine -configs. Machine configs contain configuration information for a cluster. -To list all machine config pools that are known: - -[source,terminal] ----- -$ oc get machineconfigpools ----- - -.Example output -[source,terminal] ----- -NAME   CONFIG                                  UPDATED UPDATING DEGRADED -master master-1638c1aea398413bb918e76632f20799 False   False    False -worker worker-2feef4f8288936489a5a832ca8efe953 False   False    False ----- - -To list all machine configs: - -[source,terminal] ----- -$ oc get machineconfig ----- - -.Example output -[source,terminal] ----- -NAME                                      GENERATEDBYCONTROLLER   IGNITIONVERSION   CREATED   OSIMAGEURL - -00-master                                 4.0.0-0.150.0.0-dirty   3.2.0             16m -00-master-ssh                             4.0.0-0.150.0.0-dirty                     16m -00-worker                                 4.0.0-0.150.0.0-dirty   3.2.0             16m -00-worker-ssh                             4.0.0-0.150.0.0-dirty                     16m -01-master-kubelet                         4.0.0-0.150.0.0-dirty   3.2.0             16m -01-worker-kubelet                         4.0.0-0.150.0.0-dirty   3.2.0             16m -master-1638c1aea398413bb918e76632f20799   4.0.0-0.150.0.0-dirty   3.2.0             16m -worker-2feef4f8288936489a5a832ca8efe953   4.0.0-0.150.0.0-dirty   3.2.0             16m ----- - -The Machine Config Operator acts somewhat differently than Ignition when it -comes to applying these machine configs. The machine configs are read in order -(from 00* to 99*). Labels inside the machine configs identify the type of node -each is for (master or worker). If the same file appears in multiple -machine config files, the last one wins. So, for example, any file that appears -in a 99* file would replace the same file that appeared in a 00* file. -The input `MachineConfig` objects are unioned into a "rendered" `MachineConfig` -object, which will be used as a target by the operator and is the value you -can see in the machine config pool. - -To see what files are being managed from a machine config, look for "Path:" -inside a particular `MachineConfig` object. For example: - -[source,terminal] ----- -$ oc describe machineconfigs 01-worker-container-runtime | grep Path: ----- - -.Example output -[source,terminal] ----- -            Path:            /etc/containers/registries.conf -            Path:            /etc/containers/storage.conf -            Path:            /etc/crio/crio.conf ----- - -Be sure to give the machine config file a later name -(such as 10-worker-container-runtime). Keep in mind that the content of each -file is in URL-style data. Then apply the new machine config to the cluster. diff --git a/modules/disable-quickstarts-admin-console.adoc b/modules/disable-quickstarts-admin-console.adoc deleted file mode 100644 index 8131dea0c0c0..000000000000 --- a/modules/disable-quickstarts-admin-console.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/configuring-web-console.adoc - -[id="disable-quickstarts-admin-console_{context}"] -= Disabling quick starts in the web console - -You can use the *Administrator* perspective of the web console to disable one or more quick starts. - -.Prerequisites - -* You have cluster administrator permissions and are logged in to the web console. - -include::snippets/access-cluster-configuration-console.adoc[] - -. On the *General* tab, in the *Quick starts* section, you can select items in either the *Enabled* or *Disabled* list, and move them from one list to the other by using the arrow buttons. - -** To enable or disable a single quick start, click the quick start, then use the single arrow buttons to move the quick start to the appropriate list. -** To enable or disable multiple quick starts at once, press Ctrl and click the quick starts you want to move. Then, use the single arrow buttons to move the quick starts to the appropriate list. -** To enable or disable all quick starts at once, click the double arrow buttons to move all of the quick starts to the appropriate list. diff --git a/modules/disabling-catalogsource-objects.adoc b/modules/disabling-catalogsource-objects.adoc deleted file mode 100644 index 10239881fd5e..000000000000 --- a/modules/disabling-catalogsource-objects.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * admin/olm-cs-podsched.adoc - -:_content-type: PROCEDURE -[id="disabling-catalogsource-objects_{context}"] -= Disabling default CatalogSource objects at a local level - -You can apply persistent changes to a `CatalogSource` object, such as catalog source pods, at a local level, by disabling a default `CatalogSource` object. Consider the default configuration in situations where the default `CatalogSource` object's configuration does not meet your organization's needs. By default, if you modify fields in the `spec.grpcPodConfig` section of the `CatalogSource` object, the Marketplace Operator automatically reverts these changes. - -The Marketplace Operator, `openshift-marketplace`, manages the default custom resources (CRs) of the `OperatorHub`. The `OperatorHub` manages `CatalogSource` objects. - -To apply persistent changes to `CatalogSource` object, you must first disable a default `CatalogSource` object. - -.Procedure - -* To disable all the default `CatalogSource` objects at a local level, enter the following command: -+ -[source,terminal] ----- -$ oc patch operatorhub cluster -p '{"spec": {"disableAllDefaultSources": true}}' --type=merge ----- -+ -[NOTE] -==== -You can also configure the default `OperatorHub` CR to either disable all `CatalogSource` objects or disable a specific object. -==== diff --git a/modules/disabling-etcd-encryption.adoc b/modules/disabling-etcd-encryption.adoc deleted file mode 100644 index 6f5d1ec95546..000000000000 --- a/modules/disabling-etcd-encryption.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * security/encrypting-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="disabling-etcd-encryption_{context}"] -= Disabling etcd encryption - -You can disable encryption of etcd data in your cluster. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Modify the `APIServer` object: -+ -[source,terminal] ----- -$ oc edit apiserver ----- - -. Set the `encryption` field type to `identity`: -+ -[source,yaml] ----- -spec: - encryption: - type: identity <1> ----- -<1> The `identity` type is the default value and means that no encryption is performed. - -. Save the file to apply the changes. -+ -The decryption process starts. It can take 20 minutes or longer for this process to complete, depending on the size of your cluster. - -. Verify that etcd decryption was successful. - -.. Review the `Encrypted` status condition for the OpenShift API server to verify that its resources were successfully decrypted: -+ -[source,terminal] ----- -$ oc get openshiftapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `DecryptionCompleted` upon successful decryption: -+ -[source,terminal] ----- -DecryptionCompleted -Encryption mode set to identity and everything is decrypted ----- -+ -If the output shows `DecryptionInProgress`, decryption is still in progress. Wait a few minutes and try again. - -.. Review the `Encrypted` status condition for the Kubernetes API server to verify that its resources were successfully decrypted: -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `DecryptionCompleted` upon successful decryption: -+ -[source,terminal] ----- -DecryptionCompleted -Encryption mode set to identity and everything is decrypted ----- -+ -If the output shows `DecryptionInProgress`, decryption is still in progress. Wait a few minutes and try again. - -.. Review the `Encrypted` status condition for the OpenShift OAuth API server to verify that its resources were successfully decrypted: -+ -[source,terminal] ----- -$ oc get authentication.operator.openshift.io -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `DecryptionCompleted` upon successful decryption: -+ -[source,terminal] ----- -DecryptionCompleted -Encryption mode set to identity and everything is decrypted ----- -+ -If the output shows `DecryptionInProgress`, decryption is still in progress. Wait a few minutes and try again. diff --git a/modules/disabling-insights-advisor-recommendations.adoc b/modules/disabling-insights-advisor-recommendations.adoc deleted file mode 100644 index 2b0d256497fb..000000000000 --- a/modules/disabling-insights-advisor-recommendations.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: PROCEDURE -[id="disabling-insights-advisor-recommendations_{context}"] -= Disabling Insights Advisor recommendations - -You can disable specific recommendations that affect your clusters, so that they no longer appear in your reports. It is possible to disable a recommendation for a single cluster or all of your clusters. - -[NOTE] -==== -Disabling a recommendation for all of your clusters also applies to any future clusters. -==== - -.Prerequisites - -* Remote health reporting is enabled, which is the default. -* Your cluster is registered on {cluster-manager-url}. -* You are logged in to {cluster-manager-url}. - -.Procedure - -. Navigate to *Advisor* -> *Recommendations* on {cluster-manager-url}. -. Click the name of the recommendation to disable. You are directed to the single recommendation page. -. To disable the recommendation for a single cluster: -.. Click the *Options* menu {kebab} for that cluster, and then click *Disable recommendation for cluster*. -.. Enter a justification note and click *Save*. -. To disable the recommendation for all of your clusters: -.. Click *Actions* -> *Disable recommendation*. -.. Enter a justification note and click *Save*. diff --git a/modules/disabling-insights-operator-alerts.adoc b/modules/disabling-insights-operator-alerts.adoc deleted file mode 100644 index 564ecd965a48..000000000000 --- a/modules/disabling-insights-operator-alerts.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - - -:_content-type: CONCEPT -[id="disabling-insights-operator-alerts_{context}"] -= Disabling Insights Operator alerts - -You can stop Insights Operator from firing alerts to the cluster Prometheus instance. - -. Navigate to *Workloads* -> *Secrets*. -. On the *Secrets* page, select *All Projects* from the *Project* list, and then set *Show default projects* to on. -. Select the *openshift-config* project from the *Projects* list. -. Search for the *support* secret using the *Search by name* field. If the secret does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Enter `disableInsightsAlerts` as the key with the value `True`, and click *Save*. - -After you save the changes, Insights Operator will no longer send alerts to the cluster Prometheus instance. diff --git a/modules/disabling-insights-operator-gather.adoc b/modules/disabling-insights-operator-gather.adoc deleted file mode 100644 index abfc419f6e0a..000000000000 --- a/modules/disabling-insights-operator-gather.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - - -:_content-type: PROCEDURE -[id="disabling-insights-operator-gather_{context}"] -= Disabling the Insights Operator gather operations - -You can disable the Insights Operator gather operations. Disabling the gather operations gives you the ability to increase privacy for your organization as Insights Operator will no longer gather and send Insights cluster reports to Red Hat. This will disable Insights analysis and recommendations for your cluster without affecting other core functions that require communication with Red Hat such as cluster transfers. You can view a list of attempted gather operations for your cluster from the `/insights-operator/gathers.json` file in your Insights Operator archive. Be aware that some gather operations only occur when certain conditions are met and might not appear in your most recent archive. - -:FeatureName: The `InsightsDataGather` custom resource -include::snippets/technology-preview.adoc[] - -.Prerequisites - -* You are logged in to the {product-title} web console as a user with `cluster-admin` role. - -.Procedure - -. Navigate to *Administration* -> *CustomResourceDefinitions*. -. On the *CustomResourceDefinitions* page, use the *Search by name* field to find the *InsightsDataGather* resource definition and click it. -. On the *CustomResourceDefinition details* page, click the *Instances* tab. -. Click *cluster*, and then click the *YAML* tab. -. To disable all the gather operations, edit the `InsightsDataGather` configuration file: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1alpha1 -kind: InsightsDataGather -metadata: -.... - -spec: <1> - gatherConfig: - disabledGatherers: - - all <2> ----- -+ --- -<1> The `spec` parameter specifies gather configurations. -<2> The `all` value disables all gather operations. --- -To disable individual gather operations, enter their values under the `disabledGatherers` key: -+ -[source,yaml] ----- -spec: - gatherConfig: - disabledGatherers: - - clusterconfig/container_images <1> - - clusterconfig/host_subnets - - workloads/workload_info ----- -+ --- -<1> Example individual gather operation --- -+ -. Click *Save*. -+ -After you save the changes, the Insights Operator gather configurations are updated and the operations will no longer occur. - -[NOTE] -==== -Disabling gather operations degrades Insights Advisor's ability to offer effective recommendations for your cluster. -==== diff --git a/modules/disabling-plug-in-browser.adoc b/modules/disabling-plug-in-browser.adoc deleted file mode 100644 index 465c1b1a8626..000000000000 --- a/modules/disabling-plug-in-browser.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/deploy-plugin-cluster.adoc - -:_content-type: PROCEDURE -[id="disabling-your-plugin-browser_{context}"] -= Disabling your plugin in the browser - -Console users can use the `disable-plugins` query parameter to disable specific or all dynamic plugins that would normally get loaded at run-time. - -.Procedure - -* To disable a specific plugin(s), remove the plugin you want to disable from the comma-separated list of plugin names. - -* To disable all plugins, leave an empty string in the `disable-plugins` query parameter. - -[NOTE] -==== -Cluster administrators can disable plugins in the *Cluster Settings* page of the web console -==== diff --git a/modules/disabling-project-self-provisioning.adoc b/modules/disabling-project-self-provisioning.adoc deleted file mode 100644 index 662545ba8532..000000000000 --- a/modules/disabling-project-self-provisioning.adoc +++ /dev/null @@ -1,100 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/configuring-project-creation.adoc - -:_content-type: PROCEDURE -[id="disabling-project-self-provisioning_{context}"] -= Disabling project self-provisioning - -You can prevent an authenticated user group from self-provisioning new projects. - -.Procedure - -. Log in as a user with `cluster-admin` privileges. - -. View the `self-provisioners` cluster role binding usage by running the following command: -+ -[source,terminal] ----- -$ oc describe clusterrolebinding.rbac self-provisioners ----- -+ -.Example output -[source,terminal] ----- -Name: self-provisioners -Labels: <none> -Annotations: rbac.authorization.kubernetes.io/autoupdate=true -Role: - Kind: ClusterRole - Name: self-provisioner -Subjects: - Kind Name Namespace - ---- ---- --------- - Group system:authenticated:oauth ----- -+ -Review the subjects in the `self-provisioners` section. - -. Remove the `self-provisioner` cluster role from the group `system:authenticated:oauth`. - -** If the `self-provisioners` cluster role binding binds only the `self-provisioner` role to the `system:authenticated:oauth` group, run the following command: -+ -[source,terminal] ----- -$ oc patch clusterrolebinding.rbac self-provisioners -p '{"subjects": null}' ----- - -** If the `self-provisioners` cluster role binding binds the `self-provisioner` role to more users, groups, or service accounts than the `system:authenticated:oauth` group, run the following command: -+ -[source,terminal] ----- -$ oc adm policy \ - remove-cluster-role-from-group self-provisioner \ - system:authenticated:oauth ----- - -. Edit the `self-provisioners` cluster role binding to prevent automatic updates to the role. Automatic updates reset the cluster roles to the default state. - -** To update the role binding using the CLI: - -... Run the following command: -+ -[source,terminal] ----- -$ oc edit clusterrolebinding.rbac self-provisioners ----- - -... In the displayed role binding, set the `rbac.authorization.kubernetes.io/autoupdate` parameter value to `false`, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: authorization.openshift.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "false" - ... ----- - -** To update the role binding by using a single command: -+ -[source,terminal] ----- -$ oc patch clusterrolebinding.rbac self-provisioners -p '{ "metadata": { "annotations": { "rbac.authorization.kubernetes.io/autoupdate": "false" } } }' ----- - -. Log in as an authenticated user and verify that it can no longer self-provision a project: -+ -[source,terminal] ----- -$ oc new-project test ----- -+ -.Example output -[source,terminal] ----- -Error from server (Forbidden): You may not request a new project via this API. ----- -+ -Consider customizing this project request message to provide more helpful instructions specific to your organization. diff --git a/modules/disabling-transparent-huge-pages.adoc b/modules/disabling-transparent-huge-pages.adoc deleted file mode 100644 index 528252bfcba2..000000000000 --- a/modules/disabling-transparent-huge-pages.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc - -:_content-type: PROCEDURE -[id="disable-thp_{context}"] -= Disabling Transparent Huge Pages - -Transparent Huge Pages (THP) attempt to automate most aspects of creating, managing, and using huge pages. Since THP automatically manages the huge pages, this is not always handled optimally for all types of workloads. THP can lead to performance regressions, since many applications handle huge pages on their own. Therefore, consider disabling THP. The following steps describe how to disable THP using the Node Tuning Operator (NTO). - -.Procedure - -. Create a file with the following content and name it `thp-disable-tuned.yaml`: -+ -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: thp-workers-profile - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Custom tuned profile for OpenShift to turn off THP on worker nodes - include=openshift-node - - [vm] - transparent_hugepages=never - name: openshift-thp-never-worker - - recommend: - - match: - - label: node-role.kubernetes.io/worker - priority: 25 - profile: openshift-thp-never-worker ----- - -. Create the Tuned object: -+ -[source,terminal] ----- -$ oc create -f thp-disable-tuned.yaml ----- - -. Check the list of active profiles: -+ -[source,terminal] ----- -$ oc get profile -n openshift-cluster-node-tuning-operator ----- - -.Verification - -* Log in to one of the nodes and do a regular THP check to verify if the nodes applied the profile successfully: -+ -[source,terminal] ----- -$ cat /sys/kernel/mm/transparent_hugepage/enabled ----- -+ -.Example output -[source,terminal] ----- -always madvise [never] ----- diff --git a/modules/disconnected-osus-overview.adoc b/modules/disconnected-osus-overview.adoc deleted file mode 100644 index 12f151c411b1..000000000000 --- a/modules/disconnected-osus-overview.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc - -:_content-type: CONCEPT -[id="update-service-overview_{context}"] - -= Using the OpenShift Update Service in a disconnected environment - -The OpenShift Update Service (OSUS) provides update recommendations to {product-title} clusters. Red Hat publicly hosts the OpenShift Update Service, and clusters in a connected environment can connect to the service through public APIs to retrieve update recommendations. - -However, clusters in a disconnected environment cannot access these public APIs to retrieve update information. To have a similar update experience in a disconnected environment, you can install and configure the OpenShift Update Service locally so that it is available within the disconnected environment. - -The following sections describe how to install a local OSUS instance and configure it to provide update recommendations to a cluster. \ No newline at end of file diff --git a/modules/displaying-all-insights-advisor-recommendations.adoc b/modules/displaying-all-insights-advisor-recommendations.adoc deleted file mode 100644 index 770fef46d25c..000000000000 --- a/modules/displaying-all-insights-advisor-recommendations.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: PROCEDURE -[id="displaying-all-insights-advisor-recommendations_{context}"] -= Displaying all Insights Advisor recommendations - -The Recommendations view, by default, only displays the recommendations that are detected on your clusters. However, you can view all of the recommendations in the advisor archive. - -.Prerequisites - -* Remote health reporting is enabled, which is the default. -* Your cluster is link:https://console.redhat.com/openshift/register[registered] on Red Hat Hybrid Cloud Console. -* You are logged in to {cluster-manager-url}. - -.Procedure - -. Navigate to *Advisor* -> *Recommendations* on {cluster-manager-url}. -. Click the *X* icons next to the *Clusters Impacted* and *Status* filters. -+ -You can now browse through all of the potential recommendations for your cluster. diff --git a/modules/displaying-ovs-logs.adoc b/modules/displaying-ovs-logs.adoc deleted file mode 100644 index ca1e90ec49e5..000000000000 --- a/modules/displaying-ovs-logs.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: PROCEDURE -[id="displaying-ovs-logs_{context}"] -= Displaying Open vSwitch logs - -Use the following procedure to display Open vSwitch (OVS) logs. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -* Run one of the following commands: - -** Display the logs by using the `oc` command from outside the cluster: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> -u ovs-vswitchd ----- - -** Display the logs after logging on to a node in the cluster: -+ -[source,terminal] ----- -# journalctl -b -f -u ovs-vswitchd.service ----- -+ -One way to log on to a node is by using the `oc debug node/<node_name>` command. diff --git a/modules/displaying-potential-issues-with-your-cluster.adoc b/modules/displaying-potential-issues-with-your-cluster.adoc deleted file mode 100644 index 2d441f83da21..000000000000 --- a/modules/displaying-potential-issues-with-your-cluster.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: PROCEDURE -[id="displaying-potential-issues-with-your-cluster_{context}"] -= Displaying potential issues with your cluster - -This section describes how to display the Insights report in *Insights Advisor* on {cluster-manager-url}. - -Note that Insights repeatedly analyzes your cluster and shows the latest results. These results can change, for example, if you fix an issue or a new issue has been detected. - -.Prerequisites - -* Your cluster is registered on {cluster-manager-url}. -* Remote health reporting is enabled, which is the default. -* You are logged in to {cluster-manager-url}. - -.Procedure - -. Navigate to *Advisor* -> *Recommendations* on {cluster-manager-url}. -+ -Depending on the result, Insights Advisor displays one of the following: -+ -* *No matching recommendations found*, if Insights did not identify any issues. -+ -* A list of issues Insights has detected, grouped by risk (low, moderate, important, and critical). -+ -* *No clusters yet*, if Insights has not yet analyzed the cluster. The analysis starts shortly after the cluster has been installed, registered, and connected to the internet. - -. If any issues are displayed, click the *>* icon in front of the entry for more details. -+ -Depending on the issue, the details can also contain a link to more information from Red Hat about the issue. diff --git a/modules/displaying-the-insights-status-in-the-web-console.adoc b/modules/displaying-the-insights-status-in-the-web-console.adoc deleted file mode 100644 index dfbd2add1ecd..000000000000 --- a/modules/displaying-the-insights-status-in-the-web-console.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: PROCEDURE -[id="displaying-the-insights-status-in-the-web-console_{context}"] -= Displaying the Insights status in the web console - -Insights repeatedly analyzes your cluster and you can display the status of identified potential issues of your cluster in the {product-title} web console. This status shows the number of issues in the different categories and, for further details, links to the reports in {cluster-manager-url}. - -.Prerequisites - -* Your cluster is registered in {cluster-manager-url}. -* Remote health reporting is enabled, which is the default. -* You are logged in to the {product-title} web console. - -.Procedure - -. Navigate to *Home* -> *Overview* in the {product-title} web console. - -. Click *Insights* on the *Status* card. -+ -The pop-up window lists potential issues grouped by risk. Click the individual categories or *View all recommendations in Insights Advisor* to display more details. diff --git a/modules/distr-tracing-accessing-jaeger-console.adoc b/modules/distr-tracing-accessing-jaeger-console.adoc deleted file mode 100644 index 69ada6de56b7..000000000000 --- a/modules/distr-tracing-accessing-jaeger-console.adoc +++ /dev/null @@ -1,61 +0,0 @@ -//// -Module included in the following assemblies: -* distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc -* distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc -//// -:_content-type: PROCEDURE -[id="distr-tracing-accessing-jaeger-console_{context}"] -= Accessing the Jaeger console - -To access the Jaeger console you must have either {SMProductName} or {DTProductName} installed, and {JaegerName} installed, configured, and deployed. - -The installation process creates a route to access the Jaeger console. - -If you know the URL for the Jaeger console, you can access it directly. If you do not know the URL, use the following directions. - -.Procedure from OpenShift console -. Log in to the {product-title} web console as a user with cluster-admin rights. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -. Navigate to *Networking* -> *Routes*. - -. On the *Routes* page, select the control plane project, for example `tracing-system`, from the *Namespace* menu. -+ -The *Location* column displays the linked address for each route. -+ -. If necessary, use the filter to find the `jaeger` route. Click the route *Location* to launch the console. - -. Click *Log In With OpenShift*. - -//// -.Procedure from Kiali console - -. Launch the Kiali console. - -. Click *Distributed Tracing* in the left navigation pane. - -. Click *Log In With OpenShift*. -//// - -.Procedure from the CLI - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -+ -[source,terminal] ----- -$ oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443 ----- -+ -. To query for details of the route using the command line, enter the following command. In this example, `tracing-system` is the control plane namespace. -+ -[source,terminal] ----- -$ export JAEGER_URL=$(oc get route -n tracing-system jaeger -o jsonpath='{.spec.host}') ----- -+ -. Launch a browser and navigate to ``\https://<JAEGER_URL>``, where `<JAEGER_URL>` is the route that you discovered in the previous step. - -. Log in using the same user name and password that you use to access the {Product-title} console. - -. If you have added services to the service mesh and have generated traces, you can use the filters and *Find Traces* button to search your trace data. -+ -If you are validating the console installation, there is no trace data to display. diff --git a/modules/distr-tracing-architecture.adoc b/modules/distr-tracing-architecture.adoc deleted file mode 100644 index fe03b691610b..000000000000 --- a/modules/distr-tracing-architecture.adoc +++ /dev/null @@ -1,30 +0,0 @@ -//// -This module included in the following assemblies: --service_mesh/v2x/ossm-architecture.adoc --dist_tracing_arch/distr-tracing-architecture.adoc -//// -:_content-type: CONCEPT -[id="distr-tracing-architecture_{context}"] -= {DTProductName} architecture - -{DTProductName} is made up of several components that work together to collect, store, and display tracing data. - -* *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. - -** *Client* (Jaeger client, Tracer, Reporter, instrumented application, client libraries)- The {JaegerShortName} clients are language-specific implementations of the OpenTracing API. They can be used to instrument applications for distributed tracing either manually or with a variety of existing open source frameworks, such as Camel (Fuse), Spring Boot (RHOAR), MicroProfile (RHOAR/Thorntail), Wildfly (EAP), and many more, that are already integrated with OpenTracing. - -** *Agent* (Jaeger agent, Server Queue, Processor Workers) - The {JaegerShortName} agent is a network daemon that listens for spans sent over User Datagram Protocol (UDP), which it batches and sends to the Collector. The agent is meant to be placed on the same host as the instrumented application. This is typically accomplished by having a sidecar in container environments such as Kubernetes. - -** *Jaeger Collector* (Collector, Queue, Workers) - Similar to the Jaeger agent, the Jaeger Collector receives spans and places them in an internal queue for processing. This allows the Jaeger Collector to return immediately to the client/agent instead of waiting for the span to make its way to the storage. - -** *Storage* (Data Store) - Collectors require a persistent storage backend. {JaegerName} has a pluggable mechanism for span storage. Note that for this release, the only supported storage is Elasticsearch. - -** *Query* (Query Service) - Query is a service that retrieves traces from storage. - -** *Ingester* (Ingester Service) - {DTProductName} can use Apache Kafka as a buffer between the Collector and the actual Elasticsearch backing storage. Ingester is a service that reads data from Kafka and writes to the Elasticsearch storage backend. - -** *Jaeger Console* – With the {JaegerName} user interface, you can visualize your distributed tracing data. On the Search page, you can find traces and explore details of the spans that make up an individual trace. - -* *{OTELName}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. - -** *OpenTelemetry Collector* - The OpenTelemetry Collector is a vendor-agnostic way to receive, process, and export telemetry data. The OpenTelemetry Collector supports open-source observability data formats, for example, Jaeger and Prometheus, sending to one or more open-source or commercial back-ends. The Collector is the default location instrumentation libraries export their telemetry data. diff --git a/modules/distr-tracing-change-operator-20.adoc b/modules/distr-tracing-change-operator-20.adoc deleted file mode 100644 index 9ec4c14fad64..000000000000 --- a/modules/distr-tracing-change-operator-20.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//// -This module included in the following assemblies: -- dist_tracing/dist_tracing_install/dist-tracing-updating.adoc -//// - -[id="distr-tracing-changing-operator-channel_{context}"] -= Changing the Operator channel for 2.0 - -{DTProductName} 2.0.0 made the following changes: - -* Renamed the Red Hat OpenShift Jaeger Operator to the {JaegerName} Operator. - -* Stopped support for individual release channels. Going forward, the {JaegerName} Operator will only support the *stable* Operator channel. Maintenance channels, for example *1.24-stable*, will no longer be supported by future Operators. - -As part of the update to version 2.0, you must update your OpenShift Elasticsearch and {JaegerName} Operator subscriptions. - -.Prerequisites - -* The {product-title} version is 4.6 or later. -* You have updated the OpenShift Elasticsearch Operator. -* You have backed up the Jaeger custom resource file. -* An account with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. diff --git a/modules/distr-tracing-config-default.adoc b/modules/distr-tracing-config-default.adoc deleted file mode 100644 index 3c4488dd88a2..000000000000 --- a/modules/distr-tracing-config-default.adoc +++ /dev/null @@ -1,123 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-default_{context}"] -= Distributed tracing default configuration options - -The Jaeger custom resource (CR) defines the architecture and settings to be used when creating the {JaegerShortName} resources. You can modify these parameters to customize your {JaegerShortName} implementation to your business needs. - -.Jaeger generic YAML example -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: name -spec: - strategy: <deployment_strategy> - allInOne: - options: {} - resources: {} - agent: - options: {} - resources: {} - collector: - options: {} - resources: {} - sampling: - options: {} - storage: - type: - options: {} - query: - options: {} - resources: {} - ingester: - options: {} - resources: {} - options: {} ----- - -.Jaeger parameters -[options="header"] -|=== -|Parameter |Description |Values |Default value - -|`apiVersion:` -||API version to use when creating the object. -|`jaegertracing.io/v1` -|`jaegertracing.io/v1` - -|`kind:` -|Defines the kind of Kubernetes object to create. -|`jaeger` -| - -|`metadata:` -|Data that helps uniquely identify the object, including a `name` string, `UID`, and optional `namespace`. -| -|{product-title} automatically generates the `UID` and completes the `namespace` with the name of the project where the object is created. - -|`name:` -|Name for the object. -|The name of your {JaegerShortName} instance. -|`jaeger-all-in-one-inmemory` - -|`spec:` -|Specification for the object to be created. -|Contains all of the configuration parameters for your {JaegerShortName} instance. When a common definition for all Jaeger components is required, it is defined under the `spec` node. When the definition relates to an individual component, it is placed under the `spec/<component>` node. -|N/A - -|`strategy:` -|Jaeger deployment strategy -|`allInOne`, `production`, or `streaming` -|`allInOne` - -|`allInOne:` -|Because the `allInOne` image deploys the Agent, Collector, Query, Ingester, and Jaeger UI in a single pod, configuration for this deployment must nest component configuration under the `allInOne` parameter. -| -| - -|`agent:` -|Configuration options that define the Agent. -| -| - -|`collector:` -|Configuration options that define the Jaeger Collector. -| -| - -|`sampling:` -|Configuration options that define the sampling strategies for tracing. -| -| - -|`storage:` -|Configuration options that define the storage. All storage-related options must be placed under `storage`, rather than under the `allInOne` or other component options. -| -| - -|`query:` -|Configuration options that define the Query service. -| -| - -|`ingester:` -|Configuration options that define the Ingester service. -| -| -|=== - -The following example YAML is the minimum required to create a {JaegerName} deployment using the default settings. - -.Example minimum required dist-tracing-all-in-one.yaml -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger-all-in-one-inmemory ----- diff --git a/modules/distr-tracing-config-ingester.adoc b/modules/distr-tracing-config-ingester.adoc deleted file mode 100644 index a6b4930dbbb8..000000000000 --- a/modules/distr-tracing-config-ingester.adoc +++ /dev/null @@ -1,76 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-ingester_{context}"] -= Ingester configuration options - -Ingester is a service that reads from a Kafka topic and writes to the Elasticsearch storage backend. If you are using the `allInOne` or `production` deployment strategies, you do not need to configure the Ingester service. - -.Jaeger parameters passed to the Ingester -[options="header"] -[cols="l, a, a"] -|=== -|Parameter |Description |Values -|spec: - ingester: - options: {} -|Configuration options that define the Ingester service. -| - -|options: - deadlockInterval: -|Specifies the interval, in seconds or minutes, that the Ingester must wait for a message before terminating. -The deadlock interval is disabled by default (set to `0`), to avoid terminating the Ingester when no messages arrive during system initialization. -|Minutes and seconds, for example, `1m0s`. Default value is `0`. - -|options: - kafka: - consumer: - topic: -|The `topic` parameter identifies the Kafka configuration used by the collector to produce the messages, and the Ingester to consume the messages. -|Label for the consumer. For example, `jaeger-spans`. - -|options: - kafka: - consumer: - brokers: -|Identifies the Kafka configuration used by the Ingester to consume the messages. -|Label for the broker, for example, `my-cluster-kafka-brokers.kafka:9092`. - -|options: - log-level: -|Logging level for the Ingester. -|Possible values: `debug`, `info`, `warn`, `error`, `fatal`, `dpanic`, `panic`. -|=== - -.Streaming Collector and Ingester example -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: simple-streaming -spec: - strategy: streaming - collector: - options: - kafka: - producer: - topic: jaeger-spans - brokers: my-cluster-kafka-brokers.kafka:9092 - ingester: - options: - kafka: - consumer: - topic: jaeger-spans - brokers: my-cluster-kafka-brokers.kafka:9092 - ingester: - deadlockInterval: 5 - storage: - type: elasticsearch - options: - es: - server-urls: http://elasticsearch:9200 ----- diff --git a/modules/distr-tracing-config-jaeger-collector.adoc b/modules/distr-tracing-config-jaeger-collector.adoc deleted file mode 100644 index 6a32b00d4b15..000000000000 --- a/modules/distr-tracing-config-jaeger-collector.adoc +++ /dev/null @@ -1,66 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-jaeger-collector_{context}"] -= Jaeger Collector configuration options - -The Jaeger Collector is the component responsible for receiving the spans that were captured by the tracer and writing them to persistent Elasticsearch storage when using the `production` strategy, or to AMQ Streams when using the `streaming` strategy. - -The Collectors are stateless and thus many instances of Jaeger Collector can be run in parallel. Collectors require almost no configuration, except for the location of the Elasticsearch cluster. - -.Parameters used by the Operator to define the Jaeger Collector -[options="header"] -[cols="l, a, a"] -|=== -|Parameter |Description |Values -|collector: - replicas: -|Specifies the number of Collector replicas to create. -|Integer, for example, `5` -|=== - - -.Configuration parameters passed to the Collector -[options="header"] -[cols="l, a, a"] -|=== -|Parameter |Description |Values -|spec: - collector: - options: {} -|Configuration options that define the Jaeger Collector. -| - -|options: - collector: - num-workers: -|The number of workers pulling from the queue. -|Integer, for example, `50` - -|options: - collector: - queue-size: -|The size of the Collector queue. -|Integer, for example, `2000` - -|options: - kafka: - producer: - topic: jaeger-spans -|The `topic` parameter identifies the Kafka configuration used by the Collector to produce the messages, and the Ingester to consume the messages. -|Label for the producer. - -|options: - kafka: - producer: - brokers: my-cluster-kafka-brokers.kafka:9092 -|Identifies the Kafka configuration used by the Collector to produce the messages. If brokers are not specified, and you have AMQ Streams 1.4.0+ installed, the {JaegerName} Operator will self-provision Kafka. -| - -|options: - log-level: -|Logging level for the Collector. -|Possible values: `debug`, `info`, `warn`, `error`, `fatal`, `panic`. -|=== diff --git a/modules/distr-tracing-config-otel-collector.adoc b/modules/distr-tracing-config-otel-collector.adoc deleted file mode 100644 index 3156c704d9e0..000000000000 --- a/modules/distr-tracing-config-otel-collector.adoc +++ /dev/null @@ -1,128 +0,0 @@ -//// -This module included in the following assemblies: --distr_tracing_install/distributed-tracing-deploying-otel.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-otel-collector_{context}"] -= OpenTelemetry Collector configuration options - -:FeatureName: The {OTELName} Operator -include::snippets/technology-preview.adoc[leveloffset=+1] - -The OpenTelemetry Collector consists of three components that access telemetry data: - -* *Receivers* - A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources. - -* *Processors* - (Optional) Processors are run on data between being received and being exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, it may be recommended that multiple processors be enabled. In addition, it is important to note that the order of processors matters. - -* *Exporters* - An exporter, which can be push or pull based, is how you send data to one or more backends/destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters may support one or more data sources. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings. - -You can define multiple instances of components in a custom resource YAML file. Once configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice you should only enable the components that you need. - -.sample OpenTelemetry collector custom resource file -[source,yaml] ----- -apiVersion: opentelemetry.io/v1alpha1 -kind: OpenTelemetryCollector -metadata: - name: cluster-collector - namespace: tracing-system -spec: - mode: deployment - config: | - receivers: - otlp: - protocols: - grpc: - http: - processors: - exporters: - jaeger: - endpoint: jaeger-production-collector-headless.tracing-system.svc:14250 - tls: - ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" - service: - pipelines: - traces: - receivers: [otlp] - processors: [] - exporters: [jaeger] ----- - -[NOTE] -==== -If a component is configured, but not defined within the `service` section then it is not enabled. -==== - -.Parameters used by the Operator to define the OpenTelemetry Collector -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default -|receivers: -|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline. -|`otlp`, `jaeger` -|None - -|receivers: - otlp: -|The `oltp` and `jaeger` receivers come with default settings, specifying the name of the receiver is enough to configure it. -| -| - -|processors: -|Processors run on data between being received and being exported. By default, no processors are enabled. -| -|None - -|exporters: -|An exporter sends data to one or more backends/destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings. -|`logging`, `jaeger` -|None - -|exporters: - jaeger: - endpoint: - -|The `jaeger` exporter’s endpoint must be of the form `<name>-collector-headless.<namespace>.svc`, with the name and namespace of the Jaeger deployment, for a secure connection to be established. -| -| - -|exporters: - jaeger: - tls: - ca_file: -|Path to the CA certificate. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. -| -| - -|service: - pipelines: -|Components are enabled by adding them to a pipeline under `services.pipeline`. -| -| - -|service: - pipelines: - traces: - receivers: -|You enable receivers for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - processors: -|You enable processors for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - exporters: -|You enable exporters for tracing by adding them under `service.pipelines.traces`. -| -|None -|=== diff --git a/modules/distr-tracing-config-query.adoc b/modules/distr-tracing-config-query.adoc deleted file mode 100644 index 1aa8ba06d64e..000000000000 --- a/modules/distr-tracing-config-query.adoc +++ /dev/null @@ -1,67 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-query_{context}"] -= Query configuration options - -Query is a service that retrieves traces from storage and hosts the user interface to display them. - -.Parameters used by the {JaegerName} Operator to define Query -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value - -|spec: - query: - replicas: -|Specifies the number of Query replicas to create. -|Integer, for example, `2` -| -|=== - - -.Configuration parameters passed to Query -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value - -|spec: - query: - options: {} -|Configuration options that define the Query service. -| -| - -|options: - log-level: -|Logging level for Query. -|Possible values: `debug`, `info`, `warn`, `error`, `fatal`, `panic`. -| - -|options: - query: - base-path: -|The base path for all jaeger-query HTTP routes can be set to a non-root value, for example, `/jaeger` would cause all UI URLs to start with `/jaeger`. This can be useful when running jaeger-query behind a reverse proxy. -|/<path> -| -|=== - -.Sample Query configuration -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: "Jaeger" -metadata: - name: "my-jaeger" -spec: - strategy: allInOne - allInOne: - options: - log-level: debug - query: - base-path: /jaeger ----- diff --git a/modules/distr-tracing-config-sampling.adoc b/modules/distr-tracing-config-sampling.adoc deleted file mode 100644 index 9e743c4072d9..000000000000 --- a/modules/distr-tracing-config-sampling.adoc +++ /dev/null @@ -1,99 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-sampling_{context}"] -= Distributed tracing sampling configuration options - -The {JaegerName} Operator can be used to define sampling strategies that will be supplied to tracers that have been configured to use a remote sampler. - -While all traces are generated, only a few are sampled. Sampling a trace marks the trace for further processing and storage. - -[NOTE] -==== -This is not relevant if a trace was started by the Envoy proxy, as the sampling decision is made there. The Jaeger sampling decision is only relevant when the trace is started by an application using the client. -==== - -When a service receives a request that contains no trace context, the client starts a new trace, assigns it a random trace ID, and makes a sampling decision based on the currently installed sampling strategy. The sampling decision propagates to all subsequent requests in the trace so that other services are not making the sampling decision again. - -{JaegerShortName} libraries support the following samplers: - -* *Probabilistic* - The sampler makes a random sampling decision with the probability of sampling equal to the value of the `sampling.param` property. For example, using `sampling.param=0.1` samples approximately 1 in 10 traces. - -* *Rate Limiting* - The sampler uses a leaky bucket rate limiter to ensure that traces are sampled with a certain constant rate. For example, using `sampling.param=2.0` samples requests with the rate of 2 traces per second. - -.Jaeger sampling options -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|spec: - sampling: - options: {} - default_strategy: - service_strategy: -|Configuration options that define the sampling strategies for tracing. -| -|If you do not provide configuration, the Collectors will return the default probabilistic sampling policy with 0.001 (0.1%) probability for all services. - -|default_strategy: - type: -service_strategy: - type: -|Sampling strategy to use. See descriptions above. -|Valid values are `probabilistic`, and `ratelimiting`. -|`probabilistic` - -|default_strategy: - param: -service_strategy: - param: -|Parameters for the selected sampling strategy. -|Decimal and integer values (0, .1, 1, 10) -|1 -|=== - -This example defines a default sampling strategy that is probabilistic, with a 50% chance of the trace instances being sampled. - -.Probabilistic sampling example -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: with-sampling -spec: - sampling: - options: - default_strategy: - type: probabilistic - param: 0.5 - service_strategies: - - service: alpha - type: probabilistic - param: 0.8 - operation_strategies: - - operation: op1 - type: probabilistic - param: 0.2 - - operation: op2 - type: probabilistic - param: 0.4 - - service: beta - type: ratelimiting - param: 5 ----- - -If there are no user-supplied configurations, the {JaegerShortName} uses the following settings: - -.Default sampling -[source,yaml] ----- -spec: - sampling: - options: - default_strategy: - type: probabilistic - param: 1 ----- diff --git a/modules/distr-tracing-config-security-ossm-cli.adoc b/modules/distr-tracing-config-security-ossm-cli.adoc deleted file mode 100644 index 8fe1119a7a54..000000000000 --- a/modules/distr-tracing-config-security-ossm-cli.adoc +++ /dev/null @@ -1,93 +0,0 @@ -//// -This module included in the following assemblies: -service_mesh/v2x/ossm-reference-jaeger.adoc -//// -:_content-type: PROCEDURE -[id="distr-tracing-config-security-ossm-cli_{context}"] -= Configuring distributed tracing security for service mesh from the command line - -You can modify the Jaeger resource to configure {JaegerShortName} security for use with {SMproductShortName} from the command line using the `oc` utility. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -* The {SMProductName} Operator must be installed. -* The `ServiceMeshControlPlane` deployed to the cluster. -* You have access to the OpenShift CLI (oc) that matches your OpenShift Container Platform version. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -+ -[source,terminal] ----- -$ oc login https://<HOSTNAME>:6443 ----- -+ -. Change to the project where you installed the control plane, for example `istio-system`, by entering the following command: -+ -[source,terminal] ----- -$ oc project istio-system ----- -+ -. Run the following command to edit the Jaeger custom resource file, where `jaeger.yaml` is the name of your Jaeger custom resource. -+ -[source,terminal] ----- -$ oc edit -n tracing-system -f jaeger.yaml ----- -+ -. Edit the `Jaeger` custom resource file to add the `htpasswd` configuration as shown in the following example. - -* `spec.ingress.openshift.htpasswdFile` -* `spec.volumes` -* `spec.volumeMounts` -+ -.Example Jaeger resource showing `htpasswd` configuration -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -spec: - ingress: - enabled: true - openshift: - htpasswdFile: /etc/proxy/htpasswd/auth - sar: '{"namespace": "istio-system", "resource": "pods", "verb": "get"}' - options: {} - resources: {} - security: oauth-proxy - volumes: - - name: secret-htpasswd - secret: - secretName: htpasswd - - configMap: - defaultMode: 420 - items: - - key: ca-bundle.crt - path: tls-ca-bundle.pem - name: trusted-ca-bundle - optional: true - name: trusted-ca-bundle - volumeMounts: - - mountPath: /etc/proxy/htpasswd - name: secret-htpasswd - - mountPath: /etc/pki/ca-trust/extracted/pem/ - name: trusted-ca-bundle - readOnly: true ----- -+ -. Run the following command to apply your changes, where <jaeger.yaml> is the name of your Jaeger custom resource. -+ -[source,terminal] ----- -$ oc apply -n tracing-system -f <jaeger.yaml> ----- -+ -. Run the following command to watch the progress of the pod deployment: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- diff --git a/modules/distr-tracing-config-security-ossm-web.adoc b/modules/distr-tracing-config-security-ossm-web.adoc deleted file mode 100644 index 9759190ae799..000000000000 --- a/modules/distr-tracing-config-security-ossm-web.adoc +++ /dev/null @@ -1,74 +0,0 @@ -//// -This module included in the following assemblies: -service_mesh/v2x/ossm-reference-jaeger.adoc -//// -:_content-type: PROCEDURE -[id="distr-tracing-config-security-ossm-web_{context}"] -= Configuring distributed tracing security for service mesh from the OpenShift console - -You can modify the Jaeger resource to configure {JaegerShortName} security for use with {SMproductShortName} in the OpenShift console. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -* The {SMProductName} Operator must be installed. -* The `ServiceMeshControlPlane` deployed to the cluster. -* You have access to the OpenShift Container Platform web console. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Navigate to Operators → Installed Operators. - -. Click the *Project* menu and select the project where your `ServiceMeshControlPlane` resource is deployed from the list, for example `istio-system`. - -. Click the *{JaegerName} Operator*. - -. On the *Operator Details* page, click the *Jaeger* tab. - -. Click the name of your Jaeger instance. - -. On the Jaeger details page, click the `YAML` tab to modify your configuration. - -. Edit the `Jaeger` custom resource file to add the `htpasswd` configuration as shown in the following example. - -* `spec.ingress.openshift.htpasswdFile` -* `spec.volumes` -* `spec.volumeMounts` -+ -.Example Jaeger resource showing `htpasswd` configuration -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -spec: - ingress: - enabled: true - openshift: - htpasswdFile: /etc/proxy/htpasswd/auth - sar: '{"namespace": "istio-system", "resource": "pods", "verb": "get"}' - options: {} - resources: {} - security: oauth-proxy - volumes: - - name: secret-htpasswd - secret: - secretName: htpasswd - - configMap: - defaultMode: 420 - items: - - key: ca-bundle.crt - path: tls-ca-bundle.pem - name: trusted-ca-bundle - optional: true - name: trusted-ca-bundle - volumeMounts: - - mountPath: /etc/proxy/htpasswd - name: secret-htpasswd - - mountPath: /etc/pki/ca-trust/extracted/pem/ - name: trusted-ca-bundle - readOnly: true ----- -+ -. Click *Save*. diff --git a/modules/distr-tracing-config-security-ossm.adoc b/modules/distr-tracing-config-security-ossm.adoc deleted file mode 100644 index 14bca9be19b5..000000000000 --- a/modules/distr-tracing-config-security-ossm.adoc +++ /dev/null @@ -1,11 +0,0 @@ -//// -This module included in the following assemblies: -service_mesh/v2x/ossm-reference-jaeger.adoc -//// -:_content-type: CONCEPT -[id="distr-tracing-config-security-ossm_{context}"] -= Configuring distributed tracing security for service mesh - -The {JaegerShortName} uses OAuth for default authentication. However {SMProductName} uses a secret called `htpasswd` to facilitate communication between dependent services such as Grafana, Kiali, and the {JaegerShortName}. When you configure your {JaegerShortName} in the `ServiceMeshControlPlane` the {SMProductShortName} automatically configures security settings to use `htpasswd`. - -If you are specifying your {JaegerShortName} configuration in a Jaeger custom resource, you must manually configure the `htpasswd` settings and ensure the `htpasswd` secret is mounted into your Jaeger instance so that Kiali can communicate with it. diff --git a/modules/distr-tracing-config-storage.adoc b/modules/distr-tracing-config-storage.adoc deleted file mode 100644 index 8947448d3432..000000000000 --- a/modules/distr-tracing-config-storage.adoc +++ /dev/null @@ -1,728 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-config-storage_{context}"] -= Distributed tracing storage configuration options - -You configure storage for the Collector, Ingester, and Query services under `spec.storage`. Multiple instances of each of these components can be provisioned as required for performance and resilience purposes. - -.General storage parameters used by the {JaegerName} Operator to define distributed tracing storage - -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|spec: - storage: - type: -|Type of storage to use for the deployment. -|`memory` or `elasticsearch`. -Memory storage is only appropriate for development, testing, demonstrations, and proof of concept environments as the data does not persist if the pod is shut down. For production environments {JaegerShortName} supports Elasticsearch for persistent storage. -|`memory` - -|storage: - secretname: -|Name of the secret, for example `tracing-secret`. -| -|N/A - -|storage: - options: {} -|Configuration options that define the storage. -| -| -|=== - -.Elasticsearch index cleaner parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|storage: - esIndexCleaner: - enabled: -|When using Elasticsearch storage, by default a job is created to clean old traces from the index. This parameter enables or disables the index cleaner job. -|`true`/ `false` -|`true` - -|storage: - esIndexCleaner: - numberOfDays: -|Number of days to wait before deleting an index. -|Integer value -|`7` - -|storage: - esIndexCleaner: - schedule: -|Defines the schedule for how often to clean the Elasticsearch index. -|Cron expression -|"55 23 * * *" -|=== - -[id="distributed-tracing-config-auto-provisioning-es_{context}"] -== Auto-provisioning an Elasticsearch instance - -When you deploy a Jaeger custom resource, the {JaegerName} Operator uses the OpenShift Elasticsearch Operator to create an Elasticsearch cluster based on the configuration provided in the `storage` section of the custom resource file. The {JaegerName} Operator will provision Elasticsearch if the following configurations are set: - -* `spec.storage:type` is set to `elasticsearch` -* `spec.storage.elasticsearch.doNotProvision` set to `false` -* `spec.storage.options.es.server-urls` is not defined, that is, there is no connection to an Elasticsearch instance that was not provisioned by the Red Hat Elasticsearch Operator. - -When provisioning Elasticsearch, the {JaegerName} Operator sets the Elasticsearch custom resource `name` to the value of `spec.storage.elasticsearch.name` from the Jaeger custom resource. If you do not specify a value for `spec.storage.elasticsearch.name`, the Operator uses `elasticsearch`. - -.Restrictions - -* You can have only one {JaegerShortName} with self-provisioned Elasticsearch instance per namespace. The Elasticsearch cluster is meant to be dedicated for a single {JaegerShortName} instance. -* There can be only one Elasticsearch per namespace. - -[NOTE] -==== -If you already have installed Elasticsearch as part of OpenShift Logging, the {JaegerName} Operator can use the installed OpenShift Elasticsearch Operator to provision storage. -==== - -The following configuration parameters are for a _self-provisioned_ Elasticsearch instance, that is an instance created by the {JaegerName} Operator using the OpenShift Elasticsearch Operator. You specify configuration options for self-provisioned Elasticsearch under `spec:storage:elasticsearch` in your configuration file. - -.Elasticsearch resource configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|elasticsearch: - properties: - doNotProvision: -|Use to specify whether or not an Elasticsearch instance should be provisioned by the {JaegerName} Operator. -|`true`/`false` -|`true` - -|elasticsearch: - properties: - name: -|Name of the Elasticsearch instance. The {JaegerName} Operator uses the Elasticsearch instance specified in this parameter to connect to Elasticsearch. -|string -|`elasticsearch` - -|elasticsearch: - nodeCount: -|Number of Elasticsearch nodes. For high availability use at least 3 nodes. Do not use 2 nodes as “split brain” problem can happen. -|Integer value. For example, Proof of concept = 1, -Minimum deployment =3 -|3 - -|elasticsearch: - resources: - requests: - cpu: -|Number of central processing units for requests, based on your environment's configuration. -|Specified in cores or millicores, for example, 200m, 0.5, 1. For example, Proof of concept = 500m, -Minimum deployment =1 -|1 - -|elasticsearch: - resources: - requests: - memory: -|Available memory for requests, based on your environment's configuration. -|Specified in bytes, for example, 200Ki, 50Mi, 5Gi. For example, Proof of concept = 1Gi, -Minimum deployment = 16Gi* -|16Gi - -|elasticsearch: - resources: - limits: - cpu: -|Limit on number of central processing units, based on your environment's configuration. -|Specified in cores or millicores, for example, 200m, 0.5, 1. For example, Proof of concept = 500m, -Minimum deployment =1 -| - -|elasticsearch: - resources: - limits: - memory: -|Available memory limit based on your environment's configuration. -|Specified in bytes, for example, 200Ki, 50Mi, 5Gi. For example, Proof of concept = 1Gi, -Minimum deployment = 16Gi* -| - -|elasticsearch: - redundancyPolicy: -|Data replication policy defines how Elasticsearch shards are replicated across data nodes in the cluster. If not specified, the {JaegerName} Operator automatically determines the most appropriate replication based on number of nodes. -|`ZeroRedundancy`(no replica shards), `SingleRedundancy`(one replica shard), `MultipleRedundancy`(each index is spread over half of the Data nodes), `FullRedundancy` (each index is fully replicated on every Data node in the cluster). -| - -|elasticsearch: - useCertManagement: -|Use to specify whether or not {JaegerShortName} should use the certificate management feature of the Red Hat Elasticsearch Operator. This feature was added to {logging-title} 5.2 in {product-title} 4.7 and is the preferred setting for new Jaeger deployments. -|`true`/`false` -|`true` - -| -3+|*Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you should have no less than 16Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64Gi per pod. -|=== - -.Production storage example -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: simple-prod -spec: - strategy: production - storage: - type: elasticsearch - elasticsearch: - nodeCount: 3 - resources: - requests: - cpu: 1 - memory: 16Gi - limits: - memory: 16Gi ----- - -.Storage example with persistent storage: -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: simple-prod -spec: - strategy: production - storage: - type: elasticsearch - elasticsearch: - nodeCount: 1 - storage: # <1> - storageClassName: gp2 - size: 5Gi - resources: - requests: - cpu: 200m - memory: 4Gi - limits: - memory: 4Gi - redundancyPolicy: ZeroRedundancy ----- - -<1> Persistent storage configuration. In this case AWS `gp2` with `5Gi` size. When no value is specified, {JaegerShortName} uses `emptyDir`. The OpenShift Elasticsearch Operator provisions `PersistentVolumeClaim` and `PersistentVolume` which are not removed with {JaegerShortName} instance. You can mount the same volumes if you create a {JaegerShortName} instance with the same name and namespace. - - -[id="distributed-tracing-config-external-es_{context}"] -== Connecting to an existing Elasticsearch instance - -You can use an existing Elasticsearch cluster for storage with {DTShortName}. An existing Elasticsearch cluster, also known as an _external_ Elasticsearch instance, is an instance that was not installed by the {JaegerName} Operator or by the Red Hat Elasticsearch Operator. - -When you deploy a Jaeger custom resource, the {JaegerName} Operator will not provision Elasticsearch if the following configurations are set: - -* `spec.storage.elasticsearch.doNotProvision` set to `true` -* `spec.storage.options.es.server-urls` has a value -* `spec.storage.elasticsearch.name` has a value, or if the Elasticsearch instance name is `elasticsearch`. - -The {JaegerName} Operator uses the Elasticsearch instance specified in `spec.storage.elasticsearch.name` to connect to Elasticsearch. - -.Restrictions - -* You cannot share or reuse a {product-title} logging Elasticsearch instance with {JaegerShortName}. The Elasticsearch cluster is meant to be dedicated for a single {JaegerShortName} instance. - -[NOTE] -==== -Red Hat does not provide support for your external Elasticsearch instance. You can review the tested integrations matrix on the link:https://access.redhat.com/articles/5381021[Customer Portal]. -==== - -The following configuration parameters are for an already existing Elasticsearch instance, also known as an _external_ Elasticsearch instance. In this case, you specify configuration options for Elasticsearch under `spec:storage:options:es` in your custom resource file. - -.General ES configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es: - server-urls: -|URL of the Elasticsearch instance. -|The fully-qualified domain name of the Elasticsearch server. -|`http://elasticsearch.<namespace>.svc:9200` - -|es: - max-doc-count: -|The maximum document count to return from an Elasticsearch query. This will also apply to aggregations. If you set both `es.max-doc-count` and `es.max-num-spans`, Elasticsearch will use the smaller value of the two. -| -|10000 - -|es: - max-num-spans: -|[*Deprecated* - Will be removed in a future release, use `es.max-doc-count` instead.] The maximum number of spans to fetch at a time, per query, in Elasticsearch. If you set both `es.max-num-spans` and `es.max-doc-count`, Elasticsearch will use the smaller value of the two. -| -|10000 - -|es: - max-span-age: -|The maximum lookback for spans in Elasticsearch. -| -|72h0m0s - -|es: - sniffer: -|The sniffer configuration for Elasticsearch. The client uses the sniffing process to find all nodes automatically. Disabled by default. -|`true`/ `false` -|`false` - -|es: - sniffer-tls-enabled: -|Option to enable TLS when sniffing an Elasticsearch Cluster. The client uses the sniffing process to find all nodes automatically. Disabled by default -|`true`/ `false` -|`false` - -|es: - timeout: -|Timeout used for queries. When set to zero there is no timeout. -| -|0s - -|es: - username: -|The username required by Elasticsearch. The basic authentication also loads CA if it is specified. See also `es.password`. -| -| - -|es: - password: -|The password required by Elasticsearch. See also, `es.username`. -| -| - -|es: - version: -|The major Elasticsearch version. If not specified, the value will be auto-detected from Elasticsearch. -| -|0 -|=== - -.ES data replication parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es: - num-replicas: -|The number of replicas per index in Elasticsearch. -| -|1 - -|es: - num-shards: -|The number of shards per index in Elasticsearch. -| -|5 -|=== - -.ES index configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es: - create-index-templates: -|Automatically create index templates at application startup when set to `true`. When templates are installed manually, set to `false`. -|`true`/ `false` -|`true` - -|es: - index-prefix: -|Optional prefix for {JaegerShortName} indices. For example, setting this to "production" creates indices named "production-tracing-*". -| -| -|=== - -.ES bulk processor configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es: - bulk: - actions: -|The number of requests that can be added to the queue before the bulk processor decides to commit updates to disk. -| -|1000 - -//What is the default here? The original text said "Set to zero to disable. By default, this is disabled." -|es: - bulk: - flush-interval: -|A `time.Duration` after which bulk requests are committed, regardless of other thresholds. To disable the bulk processor flush interval, set this to zero. -| -|200ms - -|es: - bulk: - size: -|The number of bytes that the bulk requests can take up before the bulk processor decides to commit updates to disk. -| -|5000000 - -|es: - bulk: - workers: -|The number of workers that are able to receive and commit bulk requests to Elasticsearch. -| -|1 -|=== - -.ES TLS configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es: - tls: - ca: -|Path to a TLS Certification Authority (CA) file used to verify the remote servers. -| -|Will use the system truststore by default. - -|es: - tls: - cert: -|Path to a TLS Certificate file, used to identify this process to the remote servers. -| -| - -|es: - tls: - enabled: -|Enable transport layer security (TLS) when talking to the remote servers. Disabled by default. -|`true`/ `false` -|`false` - -|es: - tls: - key: -|Path to a TLS Private Key file, used to identify this process to the remote servers. -| -| - -|es: - tls: - server-name: -|Override the expected TLS server name in the certificate of the remote servers. -| -| -//Clarification of "if specified" for `token-file` and `username`, does that mean if this is set? Or that it only loads the CA if one is specified (that is, if es.tls.ca has a value?) -|es: - token-file: -|Path to a file containing the bearer token. This flag also loads the Certification Authority (CA) file if it is specified. -| -| -|=== - -.ES archive configuration parameters -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default value -|es-archive: - bulk: - actions: -|The number of requests that can be added to the queue before the bulk processor decides to commit updates to disk. -| -|0 - -//What is the default here? The original text said "Set to zero to disable. By default, this is disabled." -|es-archive: - bulk: - flush-interval: -|A `time.Duration` after which bulk requests are committed, regardless of other thresholds. To disable the bulk processor flush interval, set this to zero. -| -|0s - -|es-archive: - bulk: - size: -|The number of bytes that the bulk requests can take up before the bulk processor decides to commit updates to disk. -| -|0 - -|es-archive: - bulk: - workers: -|The number of workers that are able to receive and commit bulk requests to Elasticsearch. -| -|0 - -|es-archive: - create-index-templates: -|Automatically create index templates at application startup when set to `true`. When templates are installed manually, set to `false`. -|`true`/ `false` -|`false` - -|es-archive: - enabled: -|Enable extra storage. -|`true`/ `false` -|`false` - -|es-archive: - index-prefix: -|Optional prefix for {JaegerShortName} indices. For example, setting this to "production" creates indices named "production-tracing-*". -| -| - -|es-archive: - max-doc-count: -|The maximum document count to return from an Elasticsearch query. This will also apply to aggregations. -| -|0 - -|es-archive: - max-num-spans: -|[*Deprecated* - Will be removed in a future release, use `es-archive.max-doc-count` instead.] The maximum number of spans to fetch at a time, per query, in Elasticsearch. -| -|0 - -|es-archive: - max-span-age: -|The maximum lookback for spans in Elasticsearch. -| -|0s - -|es-archive: - num-replicas: -|The number of replicas per index in Elasticsearch. -| -|0 - -|es-archive: - num-shards: -|The number of shards per index in Elasticsearch. -| -|0 - -|es-archive: - password: -|The password required by Elasticsearch. See also, `es.username`. -| -| - -|es-archive: - server-urls: -|The comma-separated list of Elasticsearch servers. Must be specified as fully qualified URLs, for example, `\http://localhost:9200`. -| -| - -|es-archive: - sniffer: -|The sniffer configuration for Elasticsearch. The client uses the sniffing process to find all nodes automatically. Disabled by default. -|`true`/ `false` -|`false` - -|es-archive: - sniffer-tls-enabled: -|Option to enable TLS when sniffing an Elasticsearch Cluster. The client uses the sniffing process to find all nodes automatically. Disabled by default. -|`true`/ `false` -|`false` - -|es-archive: - timeout: -|Timeout used for queries. When set to zero there is no timeout. -| -|0s - -|es-archive: - tls: - ca: -|Path to a TLS Certification Authority (CA) file used to verify the remote servers. -| -|Will use the system truststore by default. - -|es-archive: - tls: - cert: -|Path to a TLS Certificate file, used to identify this process to the remote servers. -| -| - -|es-archive: - tls: - enabled: -|Enable transport layer security (TLS) when talking to the remote servers. Disabled by default. -|`true`/ `false` -|`false` - -|es-archive: - tls: - key: -|Path to a TLS Private Key file, used to identify this process to the remote servers. -| -| - -|es-archive: - tls: - server-name: -|Override the expected TLS server name in the certificate of the remote servers. -| -| - -//Clarification of "if specified" for next two rows, does that mean if this is set? Or that it only loads the CA if one is specified (that is, if es-archive.tls.ca has a value?) -|es-archive: - token-file: -|Path to a file containing the bearer token. This flag also loads the Certification Authority (CA) file if it is specified. -| -| - -|es-archive: - username: -|The username required by Elasticsearch. The basic authentication also loads CA if it is specified. See also `es-archive.password`. -| -| - -|es-archive: - version: -|The major Elasticsearch version. If not specified, the value will be auto-detected from Elasticsearch. -| -|0 -|=== - - -.Storage example with volume mounts -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: simple-prod -spec: - strategy: production - storage: - type: elasticsearch - options: - es: - server-urls: https://quickstart-es-http.default.svc:9200 - index-prefix: my-prefix - tls: - ca: /es/certificates/ca.crt - secretName: tracing-secret - volumeMounts: - - name: certificates - mountPath: /es/certificates/ - readOnly: true - volumes: - - name: certificates - secret: - secretName: quickstart-es-http-certs-public ----- - -The following example shows a Jaeger CR using an external Elasticsearch cluster with TLS CA certificate mounted from a volume and user/password stored in a secret. - -.External Elasticsearch example: -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: simple-prod -spec: - strategy: production - storage: - type: elasticsearch - options: - es: - server-urls: https://quickstart-es-http.default.svc:9200 # <1> - index-prefix: my-prefix - tls: # <2> - ca: /es/certificates/ca.crt - secretName: tracing-secret # <3> - volumeMounts: # <4> - - name: certificates - mountPath: /es/certificates/ - readOnly: true - volumes: - - name: certificates - secret: - secretName: quickstart-es-http-certs-public ----- -<1> URL to Elasticsearch service running in default namespace. -<2> TLS configuration. In this case only CA certificate, but it can also contain es.tls.key and es.tls.cert when using mutual TLS. -<3> Secret which defines environment variables ES_PASSWORD and ES_USERNAME. Created by kubectl create secret generic tracing-secret --from-literal=ES_PASSWORD=changeme --from-literal=ES_USERNAME=elastic -<4> Volume mounts and volumes which are mounted into all storage components. - -[id="distr-tracing-manage-es-certificates_{context}"] -= Managing certificates with Elasticsearch - -You can create and manage certificates using the Red Hat Elasticsearch Operator. Managing certificates using the Red Hat Elasticsearch Operator also lets you use a single Elasticsearch cluster with multiple Jaeger Collectors. - -:FeatureName: Managing certificates with Elasticsearch -include::snippets/technology-preview.adoc[leveloffset=+1] - -Starting with version 2.4, the {JaegerName} Operator delegates certificate creation to the Red Hat Elasticsearch Operator by using the following annotations in the Elasticsearch custom resource: - -* `logging.openshift.io/elasticsearch-cert-management: "true"` -* `logging.openshift.io/elasticsearch-cert.jaeger-<shared-es-node-name>: "user.jaeger"` -* `logging.openshift.io/elasticsearch-cert.curator-<shared-es-node-name>: "system.logging.curator"` - -Where the `<shared-es-node-name>` is the name of the Elasticsearch node. For example, if you create an Elasticsearch node named `custom-es`, your custom resource might look like the following example. - -.Example Elasticsearch CR showing annotations -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: Elasticsearch -metadata: - annotations: - logging.openshift.io/elasticsearch-cert-management: "true" - logging.openshift.io/elasticsearch-cert.jaeger-custom-es: "user.jaeger" - logging.openshift.io/elasticsearch-cert.curator-custom-es: "system.logging.curator" - name: custom-es -spec: - managementState: Managed - nodeSpec: - resources: - limits: - memory: 16Gi - requests: - cpu: 1 - memory: 16Gi - nodes: - - nodeCount: 3 - proxyResources: {} - resources: {} - roles: - - master - - client - - data - storage: {} - redundancyPolicy: ZeroRedundancy ----- - -.Prerequisites - -* {product-title} 4.7 -* {logging-title} 5.2 -* The Elasticsearch node and the Jaeger instances must be deployed in the same namespace. For example, `tracing-system`. - -You enable certificate management by setting `spec.storage.elasticsearch.useCertManagement` to `true` in the Jaeger custom resource. - -.Example showing `useCertManagement` -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger-prod -spec: - strategy: production - storage: - type: elasticsearch - elasticsearch: - name: custom-es - doNotProvision: true - useCertManagement: true ----- - -The {JaegerName} Operator sets the Elasticsearch custom resource `name` to the value of `spec.storage.elasticsearch.name` from the Jaeger custom resource when provisioning Elasticsearch. - -The certificates are provisioned by the Red Hat Elasticsearch Operator and the {JaegerName} Operator injects the certificates. diff --git a/modules/distr-tracing-deploy-default.adoc b/modules/distr-tracing-deploy-default.adoc deleted file mode 100644 index 627adb8bc6f7..000000000000 --- a/modules/distr-tracing-deploy-default.adoc +++ /dev/null @@ -1,116 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-deploy-default_{context}"] -= Deploying the {DTShortName} default strategy from the web console - -The custom resource definition (CRD) defines the configuration used when you deploy an instance of {DTProductName}. The default CR is named `jaeger-all-in-one-inmemory` and it is configured with minimal resources to ensure that you can successfully install it on a default {product-title} installation. You can use this default configuration to create a {JaegerName} instance that uses the `AllInOne` deployment strategy, or you can define your own custom resource file. - -[NOTE] -==== -In-memory storage is not persistent. If the Jaeger pod shuts down, restarts, or is replaced, your trace data will be lost. For persistent storage, you must use the `production` or `streaming` strategies, which use Elasticsearch as the default storage. -==== - -.Prerequisites - -* The {JaegerName} Operator has been installed. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. -+ -[NOTE] -==== -If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. - -. Click the {JaegerName} Operator. On the *Details* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *Jaeger*, click *Create Instance*. - -. On the *Create Jaeger* page, to install using the defaults, click *Create* to create the {JaegerShortName} instance. - -. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-all-in-one-inmemory`. - -. On the *Jaeger Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing. - - -[id="distr-tracing-deploy-default-cli_{context}"] -== Deploying the {DTShortName} default strategy from the CLI - -Follow this procedure to create an instance of {JaegerShortName} from the command line. - -.Prerequisites - -* The {JaegerName} Operator has been installed and verified. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login --username=<NAMEOFUSER> https://<HOSTNAME>:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jaeger.yaml` that contains the following text: -+ -.Example jaeger-all-in-one.yaml -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger-all-in-one-inmemory ----- - -. Run the following command to deploy {JaegerShortName}: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f jaeger.yaml ----- - -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -jaeger-all-in-one-inmemory-cdff7897b-qhfdx 2/2 Running 0 24s ----- diff --git a/modules/distr-tracing-deploy-production-es.adoc b/modules/distr-tracing-deploy-production-es.adoc deleted file mode 100644 index 7420097adead..000000000000 --- a/modules/distr-tracing-deploy-production-es.adoc +++ /dev/null @@ -1,137 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-deploy-production_{context}"] -= Deploying the {DTShortName} production strategy from the web console - -The `production` deployment strategy is intended for production environments that require a more scalable and highly available architecture, and where long-term storage of trace data is important. - -.Prerequisites - -* The OpenShift Elasticsearch Operator has been installed. -* The {JaegerName} Operator has been installed. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. -+ -[NOTE] -==== -If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. - -. Click the {JaegerName} Operator. On the *Overview* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *Jaeger*, click *Create Instance*. - -. On the *Create Jaeger* page, replace the default `all-in-one` YAML text with your production YAML configuration, for example: - -+ -.Example jaeger-production.yaml file with Elasticsearch -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger-production - namespace: -spec: - strategy: production - ingress: - security: oauth-proxy - storage: - type: elasticsearch - elasticsearch: - nodeCount: 3 - redundancyPolicy: SingleRedundancy - esIndexCleaner: - enabled: true - numberOfDays: 7 - schedule: 55 23 * * * - esRollover: - schedule: '*/30 * * * *' ----- -+ - -. Click *Create* to create the {JaegerShortName} instance. - -. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-prod-elasticsearch`. - -. On the *Jaeger Details* page, click the *Resources* tab. Wait until all the pods have a status of "Running" before continuing. - - -[id="distr-tracing-deploy-production-cli_{context}"] -== Deploying the {DTShortName} production strategy from the CLI - -Follow this procedure to create an instance of {JaegerShortName} from the command line. - -.Prerequisites - -* The OpenShift Elasticsearch Operator has been installed. -* The {JaegerName} Operator has been installed. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login --username=<NAMEOFUSER> https://<HOSTNAME>:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jaeger-production.yaml` that contains the text of the example file in the previous procedure. - -. Run the following command to deploy {JaegerShortName}: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f jaeger-production.yaml ----- -+ -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-jaegersystemjaegerproduction-1-6676cf568gwhlw 2/2 Running 0 10m -elasticsearch-cdm-jaegersystemjaegerproduction-2-bcd4c8bf5l6g6w 2/2 Running 0 10m -elasticsearch-cdm-jaegersystemjaegerproduction-3-844d6d9694hhst 2/2 Running 0 10m -jaeger-production-collector-94cd847d-jwjlj 1/1 Running 3 8m32s -jaeger-production-query-5cbfbd499d-tv8zf 3/3 Running 3 8m32s ----- diff --git a/modules/distr-tracing-deploy-streaming.adoc b/modules/distr-tracing-deploy-streaming.adoc deleted file mode 100644 index 473e522f7d12..000000000000 --- a/modules/distr-tracing-deploy-streaming.adoc +++ /dev/null @@ -1,156 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-deploy-streaming_{context}"] -= Deploying the {DTShortName} streaming strategy from the web console - -The `streaming` deployment strategy is intended for production environments that require a more scalable and highly available architecture, and where long-term storage of trace data is important. - -The `streaming` strategy provides a streaming capability that sits between the Collector and the Elasticsearch storage. This reduces the pressure on the storage under high load situations, and enables other trace post-processing capabilities to tap into the real-time span data directly from the Kafka streaming platform. - -[NOTE] -==== -The streaming strategy requires an additional Red Hat subscription for AMQ Streams. If you do not have an AMQ Streams subscription, contact your sales representative for more information. -==== - -[NOTE] -==== -The streaming deployment strategy is currently unsupported on {ibmzProductName}. -==== - -.Prerequisites - -* The AMQ Streams Operator has been installed. If using version 1.4.0 or higher you can use self-provisioning. Otherwise you must create the Kafka instance. -* The {JaegerName} Operator has been installed. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. - -+ -[NOTE] -==== -If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ - -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. - -. Click the {JaegerName} Operator. On the *Overview* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *Jaeger*, click *Create Instance*. - -. On the *Create Jaeger* page, replace the default `all-in-one` YAML text with your streaming YAML configuration, for example: - -.Example jaeger-streaming.yaml file -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger-streaming -spec: - strategy: streaming - collector: - options: - kafka: - producer: - topic: jaeger-spans - #Note: If brokers are not defined,AMQStreams 1.4.0+ will self-provision Kafka. - brokers: my-cluster-kafka-brokers.kafka:9092 - storage: - type: elasticsearch - ingester: - options: - kafka: - consumer: - topic: jaeger-spans - brokers: my-cluster-kafka-brokers.kafka:9092 - ----- -//TODO - find out if this storage configuration is correct for OpenShift - -. Click *Create* to create the {JaegerShortName} instance. - -. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-streaming`. - -. On the *Jaeger Details* page, click the *Resources* tab. Wait until all the pods have a status of "Running" before continuing. - - -[id="distr-tracing-deploy-streaming-cli_{context}"] -== Deploying the {DTShortName} streaming strategy from the CLI - -Follow this procedure to create an instance of {JaegerShortName} from the command line. - -.Prerequisites - -* The AMQ Streams Operator has been installed. If using version 1.4.0 or higher you can use self-provisioning. Otherwise you must create the Kafka instance. -* The {JaegerName} Operator has been installed. -* You have reviewed the instructions for how to customize the deployment. -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login --username=<NAMEOFUSER> https://<HOSTNAME>:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jaeger-streaming.yaml` that contains the text of the example file in the previous procedure. - -. Run the following command to deploy Jaeger: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f jaeger-streaming.yaml ----- -+ -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-jaegersystemjaegerstreaming-1-697b66d6fcztcnn 2/2 Running 0 5m40s -elasticsearch-cdm-jaegersystemjaegerstreaming-2-5f4b95c78b9gckz 2/2 Running 0 5m37s -elasticsearch-cdm-jaegersystemjaegerstreaming-3-7b6d964576nnz97 2/2 Running 0 5m5s -jaeger-streaming-collector-6f6db7f99f-rtcfm 1/1 Running 0 80s -jaeger-streaming-entity-operator-6b6d67cc99-4lm9q 3/3 Running 2 2m18s -jaeger-streaming-ingester-7d479847f8-5h8kc 1/1 Running 0 80s -jaeger-streaming-kafka-0 2/2 Running 0 3m1s -jaeger-streaming-query-65bf5bb854-ncnc7 3/3 Running 0 80s -jaeger-streaming-zookeeper-0 2/2 Running 0 3m39s ----- diff --git a/modules/distr-tracing-deployment-best-practices.adoc b/modules/distr-tracing-deployment-best-practices.adoc deleted file mode 100644 index 8e6098bc6140..000000000000 --- a/modules/distr-tracing-deployment-best-practices.adoc +++ /dev/null @@ -1,15 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: CONCEPT -[id="distr-tracing-deployment-best-practices_{context}"] -= Deployment best practices - -* {DTProductName} instance names must be unique. If you want to have multiple {JaegerName} instances and are using sidecar injected agents, then the {JaegerName} instances should have unique names, and the injection annotation should explicitly specify the {JaegerName} instance name the tracing data should be reported to. - -* If you have a multitenant implementation and tenants are separated by namespaces, deploy a {JaegerName} instance to each tenant namespace. - -** Agent as a daemonset is not supported for multitenant installations or {product-dedicated}. Agent as a sidecar is the only supported configuration for these use cases. - -* If you are installing {DTShortName} as part of {SMProductName}, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource. diff --git a/modules/distr-tracing-features.adoc b/modules/distr-tracing-features.adoc deleted file mode 100644 index 127c08d91319..000000000000 --- a/modules/distr-tracing-features.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//// -This module included in the following assemblies: --service_mesh/v2x/ossm-architecture.adoc --dist_tracing_arch/distr-tracing-architecture.adoc -//// - -[id="distr-tracing-features_{context}"] -= {DTProductName} features - -{DTProductName} provides the following capabilities: - -* Integration with Kiali – When properly configured, you can view {DTShortName} data from the Kiali console. - -* High scalability – The {DTShortName} back end is designed to have no single points of failure and to scale with the business needs. - -* Distributed Context Propagation – Enables you to connect data from different components together to create a complete end-to-end trace. - -* Backwards compatibility with Zipkin – {DTProductName} has APIs that enable it to be used as a drop-in replacement for Zipkin, but Red Hat is not supporting Zipkin compatibility in this release. diff --git a/modules/distr-tracing-install-elasticsearch.adoc b/modules/distr-tracing-install-elasticsearch.adoc deleted file mode 100644 index 822df38406e2..000000000000 --- a/modules/distr-tracing-install-elasticsearch.adoc +++ /dev/null @@ -1,57 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-installing.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-operator-install-elasticsearch_{context}"] -= Installing the OpenShift Elasticsearch Operator - -The default {JaegerName} deployment uses in-memory storage because it is designed to be installed quickly for those evaluating {DTProductName}, giving demonstrations, or using {JaegerName} in a test environment. If you plan to use {JaegerName} in production, you must install and configure a persistent storage option, in this case, Elasticsearch. - -.Prerequisites -* You have access to the {product-title} web console. -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -[WARNING] -==== -Do not install Community versions of the Operators. Community Operators are not supported. -==== - -[NOTE] -==== -If you have already installed the OpenShift Elasticsearch Operator as part of OpenShift Logging, you do not need to install the OpenShift Elasticsearch Operator again. The {JaegerName} Operator creates the Elasticsearch instance using the installed OpenShift Elasticsearch Operator. -==== - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -. Navigate to *Operators* -> *OperatorHub*. - -. Type *Elasticsearch* into the filter box to locate the OpenShift Elasticsearch Operator. - -. Click the *OpenShift Elasticsearch Operator* provided by Red Hat to display information about the Operator. - -. Click *Install*. - -. On the *Install Operator* page, select the *stable* Update Channel. This automatically updates your Operator as new versions are released. - -. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators-redhat` project and makes the Operator available to all projects in the cluster. -+ -[NOTE] -==== -The Elasticsearch installation requires the *openshift-operators-redhat* namespace for the OpenShift Elasticsearch Operator. The other {DTProductName} Operators are installed in the `openshift-operators` namespace. -==== -+ - -* Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. -+ -[NOTE] -==== -The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. -==== - -. Click *Install*. - -. On the *Installed Operators* page, select the `openshift-operators-redhat` project. Wait until you see that the OpenShift Elasticsearch Operator shows a status of "InstallSucceeded" before continuing. diff --git a/modules/distr-tracing-install-jaeger-operator.adoc b/modules/distr-tracing-install-jaeger-operator.adoc deleted file mode 100644 index 37bcc6ba7cd7..000000000000 --- a/modules/distr-tracing-install-jaeger-operator.adoc +++ /dev/null @@ -1,53 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-installing.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-jaeger-operator-install_{context}"] -= Installing the {JaegerName} Operator - -To install {JaegerName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {JaegerName} Operator. - -By default, the Operator is installed in the `openshift-operators` project. - -.Prerequisites -* You have access to the {product-title} web console. -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -* If you require persistent storage, you must also install the OpenShift Elasticsearch Operator before installing the {JaegerName} Operator. - -[WARNING] -==== -Do not install Community versions of the Operators. Community Operators are not supported. -==== - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -. Navigate to *Operators* -> *OperatorHub*. - -. Type *distributed tracing platform* into the filter to locate the {JaegerName} Operator. - -. Click the *{JaegerName} Operator* provided by Red Hat to display information about the Operator. - -. Click *Install*. - -. On the *Install Operator* page, select the *stable* Update Channel. This automatically updates your Operator as new versions are released. -//If you select a maintenance channel, for example, *Stable*, you will receive bug fixes and security patches for the length of the support cycle for that version. - -. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster. - -* Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. -+ -[NOTE] -==== -The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. -==== -+ - -. Click *Install*. - -. Navigate to *Operators* -> *Installed Operators*. - -. On the *Installed Operators* page, select the `openshift-operators` project. Wait until you see that the {JaegerName} Operator shows a status of "Succeeded" before continuing. diff --git a/modules/distr-tracing-install-otel-operator.adoc b/modules/distr-tracing-install-otel-operator.adoc deleted file mode 100644 index d85255daa30c..000000000000 --- a/modules/distr-tracing-install-otel-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-installing.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-install-otel-operator_{context}"] -= Installing the {OTELName} Operator - -:FeatureName: The {OTELName} Operator -include::snippets/technology-preview.adoc[leveloffset=+1] - -To install {OTELName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {OTELName} Operator. - -By default, the Operator is installed in the `openshift-operators` project. - -.Prerequisites -* You have access to the {product-title} web console. -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -[WARNING] -==== -Do not install Community versions of the Operators. Community Operators are not supported. -==== - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -. Navigate to *Operators* -> *OperatorHub*. - -. Type *distributed tracing data collection* into the filter to locate the {OTELName} Operator. - -. Click the *{OTELName} Operator* provided by Red Hat to display information about the Operator. - -. Click *Install*. - -. On the *Install Operator* page, accept the default *stable* Update channel. This automatically updates your Operator as new versions are released. - -. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster. - -. Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. -+ -[NOTE] -==== -The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. -==== -+ - -. Click *Install*. - -. Navigate to *Operators* -> *Installed Operators*. - -. On the *Installed Operators* page, select the `openshift-operators` project. Wait until you see that the {OTELName} Operator shows a status of "Succeeded" before continuing. diff --git a/modules/distr-tracing-install-overview.adoc b/modules/distr-tracing-install-overview.adoc deleted file mode 100644 index d082c4e98944..000000000000 --- a/modules/distr-tracing-install-overview.adoc +++ /dev/null @@ -1,20 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-installing.adoc -//// - -:_content-type: CONCEPT -[id="distr-tracing-install-overview_{context}"] -= {DTProductName} installation overview - -The steps for installing {DTProductName} are as follows: - -* Review the documentation and determine your deployment strategy. - -* If your deployment strategy requires persistent storage, install the OpenShift Elasticsearch Operator via the OperatorHub. - -* Install the {JaegerName} Operator via the OperatorHub. - -* Modify the custom resource YAML file to support your deployment strategy. - -* Deploy one or more instances of {JaegerName} to your {product-title} environment. diff --git a/modules/distr-tracing-product-overview.adoc b/modules/distr-tracing-product-overview.adoc deleted file mode 100644 index d7519178e294..000000000000 --- a/modules/distr-tracing-product-overview.adoc +++ /dev/null @@ -1,28 +0,0 @@ -//// -This module included in the following assemblies: --service_mesh/v2x/ossm-architecture.adoc -- distributed-tracing-release-notes.adoc --distr_tracing_arch/distr-tracing-architecture.adoc --serverless/serverless-tracing.adoc -//// - -:_content-type: CONCEPT -[id="distr-tracing-product-overview_{context}"] -= Distributed tracing overview - -As a service owner, you can use distributed tracing to instrument your services to gather insights into your service architecture. -You can use {DTShortName} for monitoring, network profiling, and troubleshooting the interaction between components in modern, cloud-native, microservices-based applications. - -With {DTShortName} you can perform the following functions: - -* Monitor distributed transactions - -* Optimize performance and latency - -* Perform root cause analysis - -{DTProductName} consists of two main components: - -* *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. - -* *{OTELNAME}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. diff --git a/modules/distr-tracing-removing-instance-cli.adoc b/modules/distr-tracing-removing-instance-cli.adoc deleted file mode 100644 index cf9d94810e7f..000000000000 --- a/modules/distr-tracing-removing-instance-cli.adoc +++ /dev/null @@ -1,90 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/dist-tracing-removing.adoc -//// - -[id="distr-tracing-removing-instance-cli_{context}"] -= Removing a {JaegerName} instance from the CLI - -. Log in to the {product-title} CLI. -+ -[source,terminal] ----- -$ oc login --username=<NAMEOFUSER> ----- -+ -. To display the {JaegerShortName} instances run the command: -+ -[source,terminal] ----- -$ oc get deployments -n <jaeger-project> ----- -+ -For example, -+ -[source,terminal] ----- -$ oc get deployments -n openshift-operators ----- -+ -The names of Operators have the suffix `-operator`. The following example shows two {JaegerName} Operators and four {JaegerShortName} instances: -+ -[source,terminal] ----- -$ oc get deployments -n openshift-operators ----- -+ -You should see output similar to the following: -+ -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -elasticsearch-operator 1/1 1 1 93m -jaeger-operator 1/1 1 1 49m -jaeger-test 1/1 1 1 7m23s -jaeger-test2 1/1 1 1 6m48s -tracing1 1/1 1 1 7m8s -tracing2 1/1 1 1 35m ----- -+ -. To remove an instance of {JaegerShortName}, run the following command: -+ -[source,terminal] ----- -$ oc delete jaeger <deployment-name> -n <jaeger-project> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc delete jaeger tracing2 -n openshift-operators ----- -+ - -. To verify the deletion, run the `oc get deployments` command again: -+ -[source,terminal] ----- -$ oc get deployments -n <jaeger-project> ----- - -+ -For example: -+ -[source,terminal] ----- -$ oc get deployments -n openshift-operators ----- -+ -You should see generated output that is similar to the following example: -+ -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -elasticsearch-operator 1/1 1 1 94m -jaeger-operator 1/1 1 1 50m -jaeger-test 1/1 1 1 8m14s -jaeger-test2 1/1 1 1 7m39s -tracing1 1/1 1 1 7m59s ----- diff --git a/modules/distr-tracing-removing-instance.adoc b/modules/distr-tracing-removing-instance.adoc deleted file mode 100644 index 809277e3c3bc..000000000000 --- a/modules/distr-tracing-removing-instance.adoc +++ /dev/null @@ -1,29 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/dist-tracing-removing.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-removing-instance_{context}"] -= Removing a {JaegerName} instance using the web console - -[NOTE] -==== -When deleting an instance that uses the in-memory storage, all data is permanently lost. Data stored in a persistent storage such as Elasticsearch is not be deleted when a {JaegerName} instance is removed. -==== - -.Procedure - -. Log in to the {product-title} web console. - -. Navigate to *Operators* -> *Installed Operators*. - -. Select the name of the project where the Operators are installed from the *Project* menu, for example, `openshift-operators`. - -. Click the {JaegerName} Operator. - -. Click the *Jaeger* tab. - -. Click the Options menu {kebab} next to the instance you want to delete and select *Delete Jaeger*. - -. In the confirmation message, click *Delete*. diff --git a/modules/distr-tracing-rn-fixed-issues.adoc b/modules/distr-tracing-rn-fixed-issues.adoc deleted file mode 100644 index 6fffaa723b2f..000000000000 --- a/modules/distr-tracing-rn-fixed-issues.adoc +++ /dev/null @@ -1,57 +0,0 @@ -//// -Module included in the following assemblies: -* distributed-tracing-release-notes.adoc -* service_mesh/v2x/servicemesh-release-notes.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-rn-fixed-issues_{context}"] -= {DTProductName} fixed issues -//// -Provide the following info for each issue if possible: -Consequence - What user action or situation would make this problem appear (If you have the foo option enabled and did x)? What did the customer experience as a result of the issue? What was the symptom? -Cause - Why did this happen? -Fix - What did we change to fix the problem? -Result - How has the behavior changed as a result? Try to avoid “It is fixed” or “The issue is resolved” or “The error no longer presents”. -//// - -* link:https://issues.redhat.com/browse/OSSM-1910[OSSM-1910] -Because of an issue introduced in version 2.6, TLS connections could not be established with {product-title} {SMProductShortName}. -This update resolves the issue by changing the service port names to match conventions used by {product-title} {SMProductShortName} and Istio. - -* link:https://issues.redhat.com/browse/OBSDA-208[OBSDA-208] - Before this update, the default 200m CPU and 256Mi memory resource limits could cause {OTELShortName} to restart continuously on large clusters. - This update resolves the issue by removing these resource limits. - -* link:https://issues.redhat.com/browse/OBSDA-222[OBSDA-222] -Before this update, spans could be dropped in the {product-title} {JaegerShortName}. -To help prevent this issue from occurring, this release updates version dependencies. - -* link:https://issues.redhat.com/browse/TRACING-2337[TRACING-2337] -Jaeger is logging a repetitive warning message in the Jaeger logs similar to the following: -+ -[source,terminal] ----- -{"level":"warn","ts":1642438880.918793,"caller":"channelz/logging.go:62","msg":"[core]grpc: Server.Serve failed to create ServerTransport: connection error: desc = \"transport: http2Server.HandleStreams received bogus greeting from client: \\\"\\\\x16\\\\x03\\\\x01\\\\x02\\\\x00\\\\x01\\\\x00\\\\x01\\\\xfc\\\\x03\\\\x03vw\\\\x1a\\\\xc9T\\\\xe7\\\\xdaCj\\\\xb7\\\\x8dK\\\\xa6\\\"\"","system":"grpc","grpc_log":true} ----- -+ -This issue was resolved by exposing only the HTTP(S) port of the query service, and not the gRPC port. - -* link:https://issues.redhat.com/browse/TRACING-2009[TRACING-2009] The Jaeger Operator has been updated to include support for the Strimzi Kafka Operator 0.23.0. - -* link:https://issues.redhat.com/browse/TRACING-1907[TRACING-1907] The Jaeger agent sidecar injection was failing due to missing config maps in the application namespace. The config maps were getting automatically deleted due to an incorrect `OwnerReference` field setting and as a result, the application pods were not moving past the "ContainerCreating" stage. The incorrect settings have been removed. - -* link:https://issues.redhat.com/browse/TRACING-1725[TRACING-1725] Follow-up to TRACING-1631. Additional fix to ensure that Elasticsearch certificates are properly reconciled when there are multiple Jaeger production instances, using same name but within different namespaces. See also link:https://bugzilla.redhat.com/show_bug.cgi?id=1918920[BZ-1918920]. - -* link:https://issues.jboss.org/browse/TRACING-1631[TRACING-1631] Multiple Jaeger production instances, using same name but within different namespaces, causing Elasticsearch certificate issue. When multiple service meshes were installed, all of the Jaeger Elasticsearch instances had the same Elasticsearch secret instead of individual secrets, which prevented the OpenShift Elasticsearch Operator from communicating with all of the Elasticsearch clusters. - -* link:https://issues.redhat.com/browse/TRACING-1300[TRACING-1300] Failed connection between Agent and Collector when using Istio sidecar. An update of the Jaeger Operator enabled TLS communication by default between a Jaeger sidecar agent and the Jaeger Collector. - -* link:https://issues.redhat.com/browse/TRACING-1208[TRACING-1208] Authentication "500 Internal Error" when accessing Jaeger UI. When trying to authenticate to the UI using OAuth, I get a 500 error because oauth-proxy sidecar doesn't trust the custom CA bundle defined at installation time with the `additionalTrustBundle`. - -* link:https://issues.redhat.com/browse/TRACING-1166[TRACING-1166] It is not currently possible to use the Jaeger streaming strategy within a disconnected environment. When a Kafka cluster is being provisioned, it results in a error: `Failed to pull image registry.redhat.io/amq7/amq-streams-kafka-24-rhel7@sha256:f9ceca004f1b7dccb3b82d9a8027961f9fe4104e0ed69752c0bdd8078b4a1076`. - -* link:https://issues.redhat.com/browse/TRACING-809[TRACING-809] Jaeger Ingester is incompatible with Kafka 2.3. When there are two or more instances of the Jaeger Ingester and enough traffic it will continuously generate rebalancing messages in the logs. This is due to a regression in Kafka 2.3 that was fixed in Kafka 2.3.1. For more information, see https://github.com/jaegertracing/jaeger/issues/1819[Jaegertracing-1819]. - -* link:https://bugzilla.redhat.com/show_bug.cgi?id=1918920[BZ-1918920]/link:https://issues.redhat.com/browse/LOG-1619[LOG-1619] The Elasticsearch pods does not get restarted automatically after an update. -+ -Workaround: Restart the pods manually. diff --git a/modules/distr-tracing-rn-known-issues.adoc b/modules/distr-tracing-rn-known-issues.adoc deleted file mode 100644 index 737bc1fbe259..000000000000 --- a/modules/distr-tracing-rn-known-issues.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//// -Module included in the following assemblies: -* service_mesh/v2x/servicemesh-release-notes.adoc -* distributed-tracing--release-notes.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-rn-known-issues_{context}"] -= {DTProductName} known issues - -//// -Consequence - What user action or situation would make this problem appear (Selecting the Foo option with the Bar version 1.3 plugin enabled results in an error message)? What did the customer experience as a result of the issue? What was the symptom? -Cause (if it has been identified) - Why did this happen? -Workaround (If there is one)- What can you do to avoid or negate the effects of this issue in the meantime? Sometimes if there is no workaround it is worthwhile telling readers to contact support for advice. Never promise future fixes. -Result - If the workaround does not completely address the problem. -//// - -These limitations exist in {DTProductName}: - -* Apache Spark is not supported. -ifndef::openshift-rosa[] - -* The streaming deployment via AMQ/Kafka is unsupported on IBM Z and IBM Power Systems. -endif::openshift-rosa[] - -These are the known issues for {DTProductName}: - -* link:https://issues.redhat.com/browse/OBSDA-220[OBSDA-220] In some cases, if you try to pull an image using {OTELShortName}, the image pull fails and a `Failed to pull image` error message appears. -There is no workaround for this issue. - -* link:https://issues.redhat.com/browse/TRACING-2057[TRACING-2057] The Kafka API has been updated to `v1beta2` to support the Strimzi Kafka Operator 0.23.0. However, this API version is not supported by AMQ Streams 1.6.3. If you have the following environment, your Jaeger services will not be upgraded, and you cannot create new Jaeger services or modify existing Jaeger services: - -** Jaeger Operator channel: *1.17.x stable* or *1.20.x stable* -** AMQ Streams Operator channel: *amq-streams-1.6.x* -+ -To resolve this issue, switch the subscription channel for your AMQ Streams Operator to either *amq-streams-1.7.x* or *stable*. diff --git a/modules/distr-tracing-rn-new-features.adoc b/modules/distr-tracing-rn-new-features.adoc deleted file mode 100644 index cc2674c57079..000000000000 --- a/modules/distr-tracing-rn-new-features.adoc +++ /dev/null @@ -1,236 +0,0 @@ -//// -Module included in the following assemblies: -- distributed-tracing-release-notes.adoc -//// -//// -Feature – Describe the new functionality available to the customer. For enhancements, try to describe as specifically as possible where the customer will see changes. -Reason – If known, include why has the enhancement been implemented (use case, performance, technology, etc.). For example, showcases integration of X with Y, demonstrates Z API feature, includes latest framework bug fixes. -Result – If changed, describe the current user experience. -//// -:_content-type: REFERENCE -[id="distr-tracing-rn-new-features_{context}"] -= New features and enhancements - -This release adds improvements related to the following components and concepts. - -== New features and enhancements {DTProductName} 2.8 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.8 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.42 - -|{OTELName} -|OpenTelemetry -|0.74.0 - -|{TempoName} -|{TempoShortName} -|0.1.0 -|=== - -== New features and enhancements {DTProductName} 2.7 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.7 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.39 - -|{OTELName} -|OpenTelemetry -|0.63.1 -|=== - -== New features and enhancements {DTProductName} 2.6 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.6 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.38 - -|{OTELName} -|OpenTelemetry -|0.60 -|=== - -== New features and enhancements {DTProductName} 2.5 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -This release introduces support for ingesting OpenTelemetry protocol (OTLP) to the {JaegerName} Operator. The Operator now automatically enables the OTLP ports: - -* Port 4317 is used for OTLP gRPC protocol. -* Port 4318 is used for OTLP HTTP protocol. - -This release also adds support for collecting Kubernetes resource attributes to the {OTELName} Operator. - -=== Component versions supported in {DTProductName} version 2.5 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.36 - -|{OTELName} -|OpenTelemetry -|0.56 -|=== - - -== New features and enhancements {DTProductName} 2.4 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -This release also adds support for auto-provisioning certificates using the Red Hat Elasticsearch Operator. - -* Self-provisioning, which means using the {JaegerName} Operator to call the Red Hat Elasticsearch Operator during installation. Self provisioning is fully supported with this release. -* Creating the Elasticsearch instance and certificates first and then configuring the {JaegerShortName} to use the certificate is a Technology Preview for this release. - -[NOTE] -==== -When upgrading to {DTProductName} 2.4, the Operator recreates the Elasticsearch instance, which might take five to ten minutes. Distributed tracing will be down and unavailable for that period. -==== - -=== Component versions supported in {DTProductName} version 2.4 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.34.1 - -|{OTELName} -|OpenTelemetry -|0.49 -|=== - -== New features and enhancements {DTProductName} 2.3.1 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.3.1 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.30.2 - -|{OTELName} -|OpenTelemetry -|0.44.1-1 -|=== - -== New features and enhancements {DTProductName} 2.3.0 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -With this release, the {JaegerName} Operator is now installed to the `openshift-distributed-tracing` namespace by default. Before this update, the default installation had been in the `openshift-operators` namespace. - -=== Component versions supported in {DTProductName} version 2.3.0 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.30.1 - -|{OTELName} -|OpenTelemetry -|0.44.0 -|=== - -== New features and enhancements {DTProductName} 2.2.0 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.2.0 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.30.0 - -|{OTELName} -|OpenTelemetry -|0.42.0 -|=== - -== New features and enhancements {DTProductName} 2.1.0 - -This release of {DTProductName} addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.1.0 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.29.1 - -|{OTELName} -|OpenTelemetry -|0.41.1 -|=== - -== New features and enhancements {DTProductName} 2.0.0 - -This release marks the rebranding of Red Hat OpenShift Jaeger to {DTProductName}. This release consists of the following changes, additions, and improvements: - -* {DTProductName} now consists of the following two main components: - -** *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. - -** *{OTELName}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. - -* Updates {JaegerName} Operator to Jaeger 1.28. Going forward, {DTProductName} will only support the `stable` Operator channel. Channels for individual releases are no longer supported. - -* Introduces a new {OTELName} Operator based on OpenTelemetry 0.33. Note that this Operator is a Technology Preview feature. - -* Adds support for OpenTelemetry protocol (OTLP) to the Query service. - -* Introduces a new distributed tracing icon that appears in the OpenShift OperatorHub. - -* Includes rolling updates to the documentation to support the name change and new features. - -This release also addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. - -=== Component versions supported in {DTProductName} version 2.0.0 - -[options="header"] -|=== -|Operator |Component |Version -|{JaegerName} -|Jaeger -|1.28.0 - -|{OTELName} -|OpenTelemetry -|0.33.0 -|=== diff --git a/modules/distr-tracing-rn-technology-preview.adoc b/modules/distr-tracing-rn-technology-preview.adoc deleted file mode 100644 index bb06420e5a5d..000000000000 --- a/modules/distr-tracing-rn-technology-preview.adoc +++ /dev/null @@ -1,86 +0,0 @@ -//// -Module included in the following assemblies: -- rhbjaeger-release-notes.adoc -//// -:_content-type: CONCEPT -[id="distr-tracing-rn-technology-preview_{context}"] -= {DTProductName} Technology Preview -//// -Provide the following info for each issue if possible: -Description - Describe the new functionality available to the customer. For enhancements, try to describe as specifically as possible where the customer will see changes. Avoid the word “supports” as in [product] now supports [feature] to avoid customer confusion with full support. Say, for example, “available as a Technology Preview.” -Package - A brief description of what the customer has to install or enable to use the Technology Preview feature. (e.g., available in quickstart.zip on customer portal, JDF website, container on registry, enable option, etc.) -//// - -[IMPORTANT] -==== -Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. -==== - -== {DTProductName} 2.8.0 Technology Preview - -This release introduces support for {TempoName} as a Technology Preview feature for {DTProductName}. -The feature uses version 0.1.0 of {TempoName} and version 2.0.1 of the upstream {TempoShortName} components. - -You can use {TempoName} to replace Jaeger so that you can use S3-compatible storage instead of ElasticSearch. -Most users who use {TempoName} instead of Jaeger will not notice any difference in functionality because {TempoShortName} supports the same ingestion and query protocols as Jaeger and uses the same user interface. - -If you enable this Technology Preview feature, note the following limitations of the current implementation: - -* {TempoName} currently does not support disconnected installations. (link:https://issues.redhat.com/browse/TRACING-3145[TRACING-3145]) - -* When you use the Jaeger user interface (UI) with {TempoName}, the Jaeger UI lists only services that have sent traces within the last 15 minutes. -For services that have not sent traces within the last 15 minutes, those traces are still stored even though they are not visible in the Jaeger UI. (link:https://issues.redhat.com/browse/TRACING-3139[TRACING-3139]) - -Expanded support for the Tempo Operator is planned for future releases of {DTProductName}. -Possible additional features might include support for TLS authentication, multitenancy, and multiple clusters. -For more information about the Tempo Operator, see link:https://tempo-operator.netlify.app[the Tempo community documentation]. - -== {DTProductName} 2.4.0 Technology Preview - -This release also adds support for auto-provisioning certificates using the Red Hat Elasticsearch Operator. - -* Self-provisioning, which means using the {JaegerName} Operator to call the Red Hat Elasticsearch Operator during installation. Self provisioning is fully supported with this release. -* Creating the Elasticsearch instance and certificates first and then configuring the {JaegerShortName} to use the certificate is a Technology Preview for this release. - -== {DTProductName} 2.2.0 Technology Preview - -Unsupported OpenTelemetry Collector components included in the 2.1 release have been removed. - -== {DTProductName} 2.1.0 Technology Preview - -This release introduces a breaking change to how to configure certificates in the OpenTelemetry custom resource file. In the new version, the `ca_file` moves under `tls` in the custom resource, as shown in the following examples. - -.CA file configuration for OpenTelemetry version 0.33 -[source,yaml] ----- -spec: - mode: deployment - config: | - exporters: - jaeger: - endpoint: jaeger-production-collector-headless.tracing-system.svc:14250 - ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" ----- - -.CA file configuration for OpenTelemetry version 0.41.1 -[source,yaml] ----- -spec: - mode: deployment - config: | - exporters: - jaeger: - endpoint: jaeger-production-collector-headless.tracing-system.svc:14250 - tls: - ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" ----- - -== {DTProductName} 2.0.0 Technology Preview - -This release includes the addition of the {OTELName}, which you install using the {OTELName} Operator. {OTELName} is based on the link:https://opentelemetry.io/[OpenTelemetry] APIs and instrumentation. - -{OTELName} includes the OpenTelemetry Operator and Collector. The Collector can be used to receive traces in either the OpenTelemetry or Jaeger protocol and send the trace data to {DTProductName}. Other capabilities of the Collector are not supported at this time. - -The OpenTelemetry Collector allows developers to instrument their code with vendor agnostic APIs, avoiding vendor lock-in and enabling a growing ecosystem of observability tooling. diff --git a/modules/distr-tracing-sidecar-automatic.adoc b/modules/distr-tracing-sidecar-automatic.adoc deleted file mode 100644 index 9c425dc12c7a..000000000000 --- a/modules/distr-tracing-sidecar-automatic.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="dist-tracing-sidecar-automatic_{context}"] -= Automatically injecting sidecars - -The {JaegerName} Operator can inject Jaeger Agent sidecars into Deployment workloads. To enable automatic injection of sidecars, add the `sidecar.jaegertracing.io/inject` annotation set to either the string `true` or to the {JaegerShortName} instance name that is returned by running `$ oc get jaegers`. -When you specify `true`, there should be only a single {JaegerShortName} instance for the same namespace as the deployment, otherwise, the Operator cannot determine which {JaegerShortName} instance to use. A specific {JaegerShortName} instance name on a deployment has a higher precedence than `true` applied on its namespace. - -The following snippet shows a simple application that will inject a sidecar, with the agent pointing to the single {JaegerShortName} instance available in the same namespace: - -.Automatic sidecar injection example -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: myapp - annotations: - "sidecar.jaegertracing.io/inject": "true" # <1> -spec: - selector: - matchLabels: - app: myapp - template: - metadata: - labels: - app: myapp - spec: - containers: - - name: myapp - image: acme/myapp:myversion ----- - -<1> Set to either the string `true` or to the Jaeger instance name. - -When the sidecar is injected, the agent can then be accessed at its default location on `localhost`. diff --git a/modules/distr-tracing-sidecar-manual.adoc b/modules/distr-tracing-sidecar-manual.adoc deleted file mode 100644 index 1f914ed319cb..000000000000 --- a/modules/distr-tracing-sidecar-manual.adoc +++ /dev/null @@ -1,58 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying-jaeger.adoc -//// -:_content-type: REFERENCE -[id="distr-tracing-sidecar-manual_{context}"] -= Manually injecting sidecars - - -The {JaegerName} Operator can only automatically inject Jaeger Agent sidecars into Deployment workloads. For controller types other than `Deployments`, such as `StatefulSets`and `DaemonSets`, you can manually define the Jaeger agent sidecar in your specification. - -The following snippet shows the manual definition you can include in your containers section for a Jaeger agent sidecar: - -.Sidecar definition example for a `StatefulSet` -[source,yaml] ----- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: example-statefulset - namespace: example-ns - labels: - app: example-app -spec: - - spec: - containers: - - name: example-app - image: acme/myapp:myversion - ports: - - containerPort: 8080 - protocol: TCP - - name: jaeger-agent - image: registry.redhat.io/distributed-tracing/jaeger-agent-rhel7:<version> - # The agent version must match the Operator version - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5775 - name: zk-compact-trft - protocol: UDP - - containerPort: 5778 - name: config-rest - protocol: TCP - - containerPort: 6831 - name: jg-compact-trft - protocol: UDP - - containerPort: 6832 - name: jg-binary-trft - protocol: UDP - - containerPort: 14271 - name: admin-http - protocol: TCP - args: - - --reporter.grpc.host-port=dns:///jaeger-collector-headless.example-ns:14250 - - --reporter.type=grpc ----- - -The agent can then be accessed at its default location on localhost. diff --git a/modules/distr-tracing-upgrading-es5-es6.adoc b/modules/distr-tracing-upgrading-es5-es6.adoc deleted file mode 100644 index d2c0107a0622..000000000000 --- a/modules/distr-tracing-upgrading-es5-es6.adoc +++ /dev/null @@ -1,88 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-updating -//// - -[id="distr-tracing-upgrading-es5-es6_{context}"] -= Upgrading from Elasticsearch 5 to 6 - -When updating from Elasticsearch 5 to 6, you must delete your {JaegerShortName} instance, and then recreate the {JaegerShortName} instance because of an issue with certificates. Re-creating the {JaegerShortName} instance triggers the creation of a new set of certificates. If you are using persistent storage, the same volumes can be mounted for the new {JaegerShortName} instance as long as the {JaegerShortName} name and namespace for the new {JaegerShortName} instance are the same as the deleted {JaegerShortName} instance. - -.Procedure if {JaegerShortName} is installed as part of {SMProductName} - -. Determine the name of your Jaeger custom resource file. In this example, `istio-system` is the control plane namespace. -+ -[source,terminal] ----- -$ oc get jaeger -n <istio-system> ----- -+ -You should see something like the following: -+ -[source,terminal] ----- -NAME STATUS VERSION STRATEGY STORAGE AGE -jaeger Running 1.24.1 production elasticsearch d21h ----- -+ -. Copy the generated custom resource file into a temporary directory: -+ -[source,terminal] ----- -$ oc get jaeger jaeger -oyaml -n <istio-system> > /tmp/jaeger-cr.yaml ----- -+ -. Delete the {JaegerShortName} instance: -+ -[source,terminal] ----- -$ oc delete jaeger jaeger -n <istio-system> ----- -+ -. Recreate the {JaegerShortName} instance from your copy of the custom resource file: -+ -[source,terminal] ----- -$ oc create -f /tmp/jaeger-cr.yaml -n <istio-system> ----- -+ -. Delete the copy of the generated custom resource file: -+ -[source,terminal] ----- -$ rm /tmp/jaeger-cr.yaml ----- - - -.Procedure if {JaegerShortName} not installed as part of {SMProductName} - -Before you begin, create a copy of your Jaeger custom resource file. - -. Delete the {JaegerShortName} instance by deleting the custom resource file: -+ -[source,terminal] ----- -$ oc delete -f <jaeger-cr-file> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc delete -f jaeger-prod-elasticsearch.yaml ----- -+ -. Recreate your {JaegerShortName} instance from the backup copy of your custom resource file: -+ -[source,terminal] ----- -$ oc create -f <jaeger-cr-file> ----- -+ -. Validate that your pods have restarted: -+ -[source,terminal] ----- -$ oc get pods -n <tracing-system> -w ----- -+ diff --git a/modules/dr-hosted-cluster-within-aws-region-backup.adoc b/modules/dr-hosted-cluster-within-aws-region-backup.adoc deleted file mode 100644 index cb30ed2bac4d..000000000000 --- a/modules/dr-hosted-cluster-within-aws-region-backup.adoc +++ /dev/null @@ -1,222 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="dr-hosted-cluster-within-aws-region-backup_{context}"] -= Backing up a hosted cluster - -To recover your hosted cluster in your target management cluster, you first need to back up all of the relevant data. - -.Procedure - -. Create a configmap file to declare the source management cluster by entering this command: -+ -[source,terminal] ----- -$ oc create configmap mgmt-parent-cluster -n default --from-literal=from=${MGMT_CLUSTER_NAME} ----- - -. Shut down the reconciliation in the hosted cluster and in the node pools by entering these commands: -+ -[source,terminal] ----- -PAUSED_UNTIL="true" -oc patch -n ${HC_CLUSTER_NS} hostedclusters/${HC_CLUSTER_NAME} -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge -oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 kube-apiserver openshift-apiserver openshift-oauth-apiserver control-plane-operator ----- -+ -[source,terminal] ----- -PAUSED_UNTIL="true" -oc patch -n ${HC_CLUSTER_NS} hostedclusters/${HC_CLUSTER_NAME} -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge -oc patch -n ${HC_CLUSTER_NS} nodepools/${NODEPOOLS} -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge -oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 kube-apiserver openshift-apiserver openshift-oauth-apiserver control-plane-operator ----- - -. Back up etcd and upload the data to an S3 bucket by running this bash script: -+ -[TIP] -==== -Wrap this script in a function and call it from the main function. -==== -+ -[source,terminal] ----- -# ETCD Backup -ETCD_PODS="etcd-0" -if [ "${CONTROL_PLANE_AVAILABILITY_POLICY}" = "HighlyAvailable" ]; then - ETCD_PODS="etcd-0 etcd-1 etcd-2" -fi - -for POD in ${ETCD_PODS}; do - # Create an etcd snapshot - oc exec -it ${POD} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -- env ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/client/etcd-client-ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 snapshot save /var/lib/data/snapshot.db - oc exec -it ${POD} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -- env ETCDCTL_API=3 /usr/bin/etcdctl -w table snapshot status /var/lib/data/snapshot.db - - FILEPATH="/${BUCKET_NAME}/${HC_CLUSTER_NAME}-${POD}-snapshot.db" - CONTENT_TYPE="application/x-compressed-tar" - DATE_VALUE=`date -R` - SIGNATURE_STRING="PUT\n\n${CONTENT_TYPE}\n${DATE_VALUE}\n${FILEPATH}" - - set +x - ACCESS_KEY=$(grep aws_access_key_id ${AWS_CREDS} | head -n1 | cut -d= -f2 | sed "s/ //g") - SECRET_KEY=$(grep aws_secret_access_key ${AWS_CREDS} | head -n1 | cut -d= -f2 | sed "s/ //g") - SIGNATURE_HASH=$(echo -en ${SIGNATURE_STRING} | openssl sha1 -hmac "${SECRET_KEY}" -binary | base64) - set -x - - # FIXME: this is pushing to the OIDC bucket - oc exec -it etcd-0 -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -- curl -X PUT -T "/var/lib/data/snapshot.db" \ - -H "Host: ${BUCKET_NAME}.s3.amazonaws.com" \ - -H "Date: ${DATE_VALUE}" \ - -H "Content-Type: ${CONTENT_TYPE}" \ - -H "Authorization: AWS ${ACCESS_KEY}:${SIGNATURE_HASH}" \ - https://${BUCKET_NAME}.s3.amazonaws.com/${HC_CLUSTER_NAME}-${POD}-snapshot.db -done ----- -+ -For more information about backing up etcd, see "Backing up and restoring etcd on a hosted cluster". - -. Back up Kubernetes and {product-title} objects by entering the following commands. You need to back up the following objects: - - * `HostedCluster` and `NodePool` objects from the HostedCluster namespace - * `HostedCluster` secrets from the HostedCluster namespace - * `HostedControlPlane` from the Hosted Control Plane namespace - * `Cluster` from the Hosted Control Plane namespace - * `AWSCluster`, `AWSMachineTemplate`, and `AWSMachine` from the Hosted Control Plane namespace - * `MachineDeployments`, `MachineSets`, and `Machines` from the Hosted Control Plane namespace - * `ControlPlane` secrets from the Hosted Control Plane namespace -+ -[source,terminal] ----- -mkdir -p ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS} ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -chmod 700 ${BACKUP_DIR}/namespaces/ - -# HostedCluster -echo "Backing Up HostedCluster Objects:" -oc get hc ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml -echo "--> HostedCluster" -sed -i '' -e '/^status:$/,$d' ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml - -# NodePool -oc get np ${NODEPOOLS} -n ${HC_CLUSTER_NS} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml -echo "--> NodePool" -sed -i '' -e '/^status:$/,$ d' ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml - -# Secrets in the HC Namespace -echo "--> HostedCluster Secrets:" -for s in $(oc get secret -n ${HC_CLUSTER_NS} | grep "^${HC_CLUSTER_NAME}" | awk '{print $1}'); do - oc get secret -n ${HC_CLUSTER_NS} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/secret-${s}.yaml -done - -# Secrets in the HC Control Plane Namespace -echo "--> HostedCluster ControlPlane Secrets:" -for s in $(oc get secret -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} | egrep -v "docker|service-account-token|oauth-openshift|NAME|token-${HC_CLUSTER_NAME}" | awk '{print $1}'); do - oc get secret -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/secret-${s}.yaml -done - -# Hosted Control Plane -echo "--> HostedControlPlane:" -oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/hcp-${HC_CLUSTER_NAME}.yaml - -# Cluster -echo "--> Cluster:" -CL_NAME=$(oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o jsonpath={.metadata.labels.\*} | grep ${HC_CLUSTER_NAME}) -oc get cluster ${CL_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/cl-${HC_CLUSTER_NAME}.yaml - -# AWS Cluster -echo "--> AWS Cluster:" -oc get awscluster ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awscl-${HC_CLUSTER_NAME}.yaml - -# AWS MachineTemplate -echo "--> AWS Machine Template:" -oc get awsmachinetemplate ${NODEPOOLS} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsmt-${HC_CLUSTER_NAME}.yaml - -# AWS Machines -echo "--> AWS Machine:" -CL_NAME=$(oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o jsonpath={.metadata.labels.\*} | grep ${HC_CLUSTER_NAME}) -for s in $(oc get awsmachines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --no-headers | grep ${CL_NAME} | cut -f1 -d\ ); do - oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} awsmachines $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsm-${s}.yaml -done - -# MachineDeployments -echo "--> HostedCluster MachineDeployments:" -for s in $(oc get machinedeployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do - mdp_name=$(echo ${s} | cut -f 2 -d /) - oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machinedeployment-${mdp_name}.yaml -done - -# MachineSets -echo "--> HostedCluster MachineSets:" -for s in $(oc get machineset -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do - ms_name=$(echo ${s} | cut -f 2 -d /) - oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machineset-${ms_name}.yaml -done - -# Machines -echo "--> HostedCluster Machine:" -for s in $(oc get machine -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do - m_name=$(echo ${s} | cut -f 2 -d /) - oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machine-${m_name}.yaml -done ----- - -. Clean up the `ControlPlane` routes by entering this command: -+ -[source,terminal] ----- -$ oc delete routes -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all ----- -+ -By entering that command, you enable the ExternalDNS Operator to delete the Route53 entries. - -. Verify that the Route53 entries are clean by running this script: -+ -[source,terminal] ----- -function clean_routes() { - - if [[ -z "${1}" ]];then - echo "Give me the NS where to clean the routes" - exit 1 - fi - - # Constants - if [[ -z "${2}" ]];then - echo "Give me the Route53 zone ID" - exit 1 - fi - - ZONE_ID=${2} - ROUTES=10 - timeout=40 - count=0 - - # This allows us to remove the ownership in the AWS for the API route - oc delete route -n ${1} --all - - while [ ${ROUTES} -gt 2 ] - do - echo "Waiting for ExternalDNS Operator to clean the DNS Records in AWS Route53 where the zone id is: ${ZONE_ID}..." - echo "Try: (${count}/${timeout})" - sleep 10 - if [[ $count -eq timeout ]];then - echo "Timeout waiting for cleaning the Route53 DNS records" - exit 1 - fi - count=$((count+1)) - ROUTES=$(aws route53 list-resource-record-sets --hosted-zone-id ${ZONE_ID} --max-items 10000 --output json | grep -c ${EXTERNAL_DNS_DOMAIN}) - done -} - -# SAMPLE: clean_routes "<HC ControlPlane Namespace>" "<AWS_ZONE_ID>" -clean_routes "${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}" "${AWS_ZONE_ID}" ----- - -.Verification - -Check all of the {product-title} objects and the S3 bucket to verify that everything looks as expected. - -.Next steps - -Restore your hosted cluster. diff --git a/modules/dr-hosted-cluster-within-aws-region-delete.adoc b/modules/dr-hosted-cluster-within-aws-region-delete.adoc deleted file mode 100644 index 887e8c67ed50..000000000000 --- a/modules/dr-hosted-cluster-within-aws-region-delete.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="dr-hosted-cluster-within-aws-region-delete_{context}"] -= Deleting a hosted cluster from your source management cluster - -After you back up your hosted cluster and restore it to your destination management cluster, you shut down and delete the hosted cluster on your source management cluster. - -.Prerequisites - -You backed up your data and restored it to your source management cluster. - -[TIP] -==== -Ensure that the `kubeconfig` file of the destination management cluster is placed as it is set in the `KUBECONFIG` variable or, if you use the script, in the `MGMT_KUBECONFIG` variable. Use `export KUBECONFIG=<Kubeconfig FilePath>` or, if you use the script, use `export KUBECONFIG=${MGMT_KUBECONFIG}`. -==== - -.Procedure - -. Scale the `deployment` and `statefulset` objects by entering these commands: -+ -[source,terminal] ----- -# Just in case -export KUBECONFIG=${MGMT_KUBECONFIG} - -# Scale down deployments -oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all -oc scale statefulset.apps -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all -sleep 15 ----- - -. Delete the `NodePool` objects by entering these commands: -+ -[source,terminal] ----- -NODEPOOLS=$(oc get nodepools -n ${HC_CLUSTER_NS} -o=jsonpath='{.items[?(@.spec.clusterName=="'${HC_CLUSTER_NAME}'")].metadata.name}') -if [[ ! -z "${NODEPOOLS}" ]];then - oc patch -n "${HC_CLUSTER_NS}" nodepool ${NODEPOOLS} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' - oc delete np -n ${HC_CLUSTER_NS} ${NODEPOOLS} -fi ----- - -. Delete the `machine` and `machineset` objects by entering these commands: -+ -[source,terminal] ----- -# Machines -for m in $(oc get machines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do - oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' || true - oc delete -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} || true -done - -oc delete machineset -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all || true ----- - -. Delete the cluster object by entering these commands: -+ -[source,terminal] ----- -# Cluster -C_NAME=$(oc get cluster -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name) -oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${C_NAME} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' -oc delete cluster.cluster.x-k8s.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all ----- - -. Delete the AWS machines (Kubernetes objects) by entering these commands. Do not worry about deleting the real AWS machines. The cloud instances will not be affected. -+ -[source,terminal] ----- -# AWS Machines -for m in $(oc get awsmachine.infrastructure.cluster.x-k8s.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name) -do - oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' || true - oc delete -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} || true -done ----- - -. Delete the `HostedControlPlane` and `ControlPlane` HC namespace objects by entering these commands: -+ -[source,terminal] ----- -# Delete HCP and ControlPlane HC NS -oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} hostedcontrolplane.hypershift.openshift.io ${HC_CLUSTER_NAME} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' -oc delete hostedcontrolplane.hypershift.openshift.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all -oc delete ns ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} || true ----- - -. Delete the `HostedCluster` and HC namespace objects by entering these commands: -+ -[source,terminal] ----- -# Delete HC and HC Namespace -oc -n ${HC_CLUSTER_NS} patch hostedclusters ${HC_CLUSTER_NAME} -p '{"metadata":{"finalizers":null}}' --type merge || true -oc delete hc -n ${HC_CLUSTER_NS} ${HC_CLUSTER_NAME} || true -oc delete ns ${HC_CLUSTER_NS} || true ----- - -.Verification - -* To verify that everything works, enter these commands: -+ -[source,terminal] ----- -# Validations -export KUBECONFIG=${MGMT2_KUBECONFIG} - -oc get hc -n ${HC_CLUSTER_NS} -oc get np -n ${HC_CLUSTER_NS} -oc get pod -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -oc get machines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} - -# Inside the HostedCluster -export KUBECONFIG=${HC_KUBECONFIG} -oc get clusterversion -oc get nodes ----- - -.Next steps - -Delete the OVN pods in the hosted cluster so that you can connect to the new OVN control plane that runs in the new management cluster: - -. Load the `KUBECONFIG` environment variable with the hosted cluster's kubeconfig path. - -. Enter this command: -+ -[source,terminal] ----- -$ oc delete pod -n openshift-ovn-kubernetes --all ----- diff --git a/modules/dr-hosted-cluster-within-aws-region-restore.adoc b/modules/dr-hosted-cluster-within-aws-region-restore.adoc deleted file mode 100644 index 2e73f04aa59e..000000000000 --- a/modules/dr-hosted-cluster-within-aws-region-restore.adoc +++ /dev/null @@ -1,157 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="dr-hosted-cluster-within-aws-region-restore_{context}"] -= Restoring a hosted cluster - -Gather all of the objects that you backed up and restore them in your destination management cluster. - -.Prerequisites - -You backed up the data from your source management cluster. - -[TIP] -==== -Ensure that the `kubeconfig` file of the destination management cluster is placed as it is set in the `KUBECONFIG` variable or, if you use the script, in the `MGMT2_KUBECONFIG` variable. Use `export KUBECONFIG=<Kubeconfig FilePath>` or, if you use the script, use `export KUBECONFIG=${MGMT2_KUBECONFIG}`. -==== - -.Procedure - -. Verify that the new management cluster does not contain any namespaces from the cluster that you are restoring by entering these commands: -+ -[source,terminal] ----- -# Just in case -export KUBECONFIG=${MGMT2_KUBECONFIG} -BACKUP_DIR=${HC_CLUSTER_DIR}/backup - -# Namespace deletion in the destination Management cluster -$ oc delete ns ${HC_CLUSTER_NS} || true -$ oc delete ns ${HC_CLUSTER_NS}-{HC_CLUSTER_NAME} || true ----- - -. Re-create the deleted namespaces by entering these commands: -+ -[source,terminal] ----- -# Namespace creation -$ oc new-project ${HC_CLUSTER_NS} -$ oc new-project ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ----- - -. Restore the secrets in the HC namespace by entering this command: -+ -[source,terminal] ----- -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/secret-* ----- - -. Restore the objects in the `HostedCluster` control plane namespace by entering these commands: -+ -[source,terminal] ----- -# Secrets -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/secret-* - -# Cluster -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/hcp-* -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/cl-* ----- - -. If you are recovering the nodes and the node pool to reuse AWS instances, restore the objects in the HC control plane namespace by entering these commands: -+ -[source,terminal] ----- -# AWS -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awscl-* -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsmt-* -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsm-* - -# Machines -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machinedeployment-* -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machineset-* -$ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machine-* ----- - -. Restore the etcd data and the hosted cluster by running this bash script: -+ -[source,terminal] ----- -ETCD_PODS="etcd-0" -if [ "${CONTROL_PLANE_AVAILABILITY_POLICY}" = "HighlyAvailable" ]; then - ETCD_PODS="etcd-0 etcd-1 etcd-2" -fi - -HC_RESTORE_FILE=${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}-restore.yaml -HC_BACKUP_FILE=${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml -HC_NEW_FILE=${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}-new.yaml -cat ${HC_BACKUP_FILE} > ${HC_NEW_FILE} -cat > ${HC_RESTORE_FILE} <<EOF - restoreSnapshotURL: -EOF - -for POD in ${ETCD_PODS}; do - # Create a pre-signed URL for the etcd snapshot - ETCD_SNAPSHOT="s3://${BUCKET_NAME}/${HC_CLUSTER_NAME}-${POD}-snapshot.db" - ETCD_SNAPSHOT_URL=$(AWS_DEFAULT_REGION=${MGMT2_REGION} aws s3 presign ${ETCD_SNAPSHOT}) - - # FIXME no CLI support for restoreSnapshotURL yet - cat >> ${HC_RESTORE_FILE} <<EOF - - "${ETCD_SNAPSHOT_URL}" -EOF -done - -cat ${HC_RESTORE_FILE} - -if ! grep ${HC_CLUSTER_NAME}-snapshot.db ${HC_NEW_FILE}; then - sed -i '' -e "/type: PersistentVolume/r ${HC_RESTORE_FILE}" ${HC_NEW_FILE} - sed -i '' -e '/pausedUntil:/d' ${HC_NEW_FILE} -fi - -HC=$(oc get hc -n ${HC_CLUSTER_NS} ${HC_CLUSTER_NAME} -o name || true) -if [[ ${HC} == "" ]];then - echo "Deploying HC Cluster: ${HC_CLUSTER_NAME} in ${HC_CLUSTER_NS} namespace" - oc apply -f ${HC_NEW_FILE} -else - echo "HC Cluster ${HC_CLUSTER_NAME} already exists, avoiding step" -fi ----- - -. If you are recovering the nodes and the node pool to reuse AWS instances, restore the node pool by entering this command: -+ -[source,terminal] ----- -oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-* ----- - -.Verification - -* To verify that the nodes are fully restored, use this function: -+ -[source,terminal] ----- -timeout=40 -count=0 -NODE_STATUS=$(oc get nodes --kubeconfig=${HC_KUBECONFIG} | grep -v NotReady | grep -c "worker") || NODE_STATUS=0 - -while [ ${NODE_POOL_REPLICAS} != ${NODE_STATUS} ] -do - echo "Waiting for Nodes to be Ready in the destination MGMT Cluster: ${MGMT2_CLUSTER_NAME}" - echo "Try: (${count}/${timeout})" - sleep 30 - if [[ $count -eq timeout ]];then - echo "Timeout waiting for Nodes in the destination MGMT Cluster" - exit 1 - fi - count=$((count+1)) - NODE_STATUS=$(oc get nodes --kubeconfig=${HC_KUBECONFIG} | grep -v NotReady | grep -c "worker") || NODE_STATUS=0 -done ----- - -.Next steps - -Shut down and delete your cluster. - - diff --git a/modules/dr-hosted-cluster-within-aws-region-script.adoc b/modules/dr-hosted-cluster-within-aws-region-script.adoc deleted file mode 100644 index a1049b78ee52..000000000000 --- a/modules/dr-hosted-cluster-within-aws-region-script.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="dr-hosted-cluster-within-aws-region-script_{context}"] -= Running a script to back up and restore a hosted cluster - -To expedite the process to back up a hosted cluster and restore it within the same region on AWS, you can modify and run a script. - -.Procedure - -. Replace the variables in the following script with your information: -+ -[source,terminal] ----- -# Fill the Common variables to fit your environment, this is just a sample -SSH_KEY_FILE=${HOME}/.ssh/id_rsa.pub -BASE_PATH=${HOME}/hypershift -BASE_DOMAIN="aws.sample.com" -PULL_SECRET_FILE="${HOME}/pull_secret.json" -AWS_CREDS="${HOME}/.aws/credentials" -CONTROL_PLANE_AVAILABILITY_POLICY=SingleReplica -HYPERSHIFT_PATH=${BASE_PATH}/src/hypershift -HYPERSHIFT_CLI=${HYPERSHIFT_PATH}/bin/hypershift -HYPERSHIFT_IMAGE=${HYPERSHIFT_IMAGE:-"quay.io/${USER}/hypershift:latest"} -NODE_POOL_REPLICAS=${NODE_POOL_REPLICAS:-2} - -# MGMT Context -MGMT_REGION=us-west-1 -MGMT_CLUSTER_NAME="${USER}-dev" -MGMT_CLUSTER_NS=${USER} -MGMT_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT_CLUSTER_NS}-${MGMT_CLUSTER_NAME}" -MGMT_KUBECONFIG="${MGMT_CLUSTER_DIR}/kubeconfig" - -# MGMT2 Context -MGMT2_CLUSTER_NAME="${USER}-dest" -MGMT2_CLUSTER_NS=${USER} -MGMT2_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT2_CLUSTER_NS}-${MGMT2_CLUSTER_NAME}" -MGMT2_KUBECONFIG="${MGMT2_CLUSTER_DIR}/kubeconfig" - -# Hosted Cluster Context -HC_CLUSTER_NS=clusters -HC_REGION=us-west-1 -HC_CLUSTER_NAME="${USER}-hosted" -HC_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}" -HC_KUBECONFIG="${HC_CLUSTER_DIR}/kubeconfig" -BACKUP_DIR=${HC_CLUSTER_DIR}/backup - -BUCKET_NAME="${USER}-hosted-${MGMT_REGION}" - -# DNS -AWS_ZONE_ID="Z026552815SS3YPH9H6MG" -EXTERNAL_DNS_DOMAIN="guest.jpdv.aws.kerbeross.com" ----- - -. Save the script to your local file system. - -. Run the script by entering the following command: -+ -[source,terminal] ----- -source <env_file> ----- -+ -where: `env_file` is the name of the file where you saved the script. -+ -The migration script is maintained at the following repository: link:https://github.com/openshift/hypershift/blob/main/contrib/migration/migrate-hcp.sh[https://github.com/openshift/hypershift/blob/main/contrib/migration/migrate-hcp.sh]. \ No newline at end of file diff --git a/modules/dr-recover-expired-control-plane-certs.adoc b/modules/dr-recover-expired-control-plane-certs.adoc deleted file mode 100644 index c70cda469977..000000000000 --- a/modules/dr-recover-expired-control-plane-certs.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/scenario-3-expired-certs.adoc - -:_content-type: PROCEDURE -[id="dr-scenario-3-recovering-expired-certs_{context}"] -= Recovering from expired control plane certificates - -The cluster can automatically recover from expired control plane certificates. - -However, you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. For user-provisioned installations, you might also need to approve pending kubelet serving CSRs. - -Use the following steps to approve the pending CSRs: - -.Procedure - -. Get the list of current CSRs: -+ -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output ----- -NAME AGE SIGNERNAME REQUESTOR CONDITION -csr-2s94x 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending <1> -csr-4bd6t 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending -csr-4hl85 13m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending <2> -csr-zhhhp 3m8s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -... ----- -<1> A pending kubelet service CSR (for user-provisioned installations). -<2> A pending `node-bootstrapper` CSR. - -. Review the details of a CSR to verify that it is valid: -+ -[source,terminal] ----- -$ oc describe csr <csr_name> <1> ----- -<1> `<csr_name>` is the name of a CSR from the list of current CSRs. - -. Approve each valid `node-bootstrapper` CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> ----- - -. For user-provisioned installations, approve each valid kubelet serving CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> ----- diff --git a/modules/dr-restoring-cluster-state-about.adoc b/modules/dr-restoring-cluster-state-about.adoc deleted file mode 100644 index afba3a3db5eb..000000000000 --- a/modules/dr-restoring-cluster-state-about.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/scenario-2-restoring-cluster-state.adoc - -:_content-type: CONCEPT -[id="dr-scenario-2-restoring-cluster-state-about_{context}"] -= About restoring cluster state - -You can use an etcd backup to restore your cluster to a previous state. This can be used to recover from the following situations: - -* The cluster has lost the majority of control plane hosts (quorum loss). -* An administrator has deleted something critical and must restore to recover the cluster. - -[WARNING] -==== -Restoring to a previous cluster state is a destructive and destablizing action to take on a running cluster. This should only be used as a last resort. - -If you are able to retrieve data using the Kubernetes API server, then etcd is available and you should not restore using an etcd backup. -==== - -Restoring etcd effectively takes a cluster back in time and all clients will experience a conflicting, parallel history. This can impact the behavior of watching components like kubelets, Kubernetes controller managers, SDN controllers, and persistent volume controllers. - -It can cause Operator churn when the content in etcd does not match the actual content on disk, causing Operators for the Kubernetes API server, Kubernetes controller manager, Kubernetes scheduler, and etcd to get stuck when files on disk conflict with content in etcd. This can require manual actions to resolve the issues. - -In extreme cases, the cluster can lose track of persistent volumes, delete critical workloads that no longer exist, reimage machines, and rewrite CA bundles with expired certificates. diff --git a/modules/dr-restoring-cluster-state.adoc b/modules/dr-restoring-cluster-state.adoc deleted file mode 100644 index 548fc9271ca8..000000000000 --- a/modules/dr-restoring-cluster-state.adoc +++ /dev/null @@ -1,730 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/scenario-2-restoring-cluster-state.adoc -// * post_installation_configuration/cluster-tasks.adoc - - -:_content-type: PROCEDURE -[id="dr-scenario-2-restoring-cluster-state_{context}"] -= Restoring to a previous cluster state - -You can use a saved etcd backup to restore a previous cluster state or restore a cluster that has lost the majority of control plane hosts. - -[NOTE] -==== -If your cluster uses a control plane machine set, see "Troubleshooting the control plane machine set" for a more simple etcd recovery procedure. -==== - -[IMPORTANT] -==== -When you restore your cluster, you must use an etcd backup that was taken from the same z-stream release. For example, an {product-title} 4.7.2 cluster must use an etcd backup that was taken from 4.7.2. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* A healthy control plane host to use as the recovery host. -* SSH access to control plane hosts. -* A backup directory containing both the etcd snapshot and the resources for the static pods, which were from the same backup. The file names in the directory must be in the following formats: `snapshot_<datetimestamp>.db` and `static_kuberesources_<datetimestamp>.tar.gz`. - -[IMPORTANT] -==== -For non-recovery control plane nodes, it is not required to establish SSH connectivity or to stop the static pods. You can delete and recreate other non-recovery, control plane machines, one by one. -==== - -.Procedure - -. Select a control plane host to use as the recovery host. This is the host that you will run the restore operation on. - -. Establish SSH connectivity to each of the control plane nodes, including the recovery host. -+ -The Kubernetes API server becomes inaccessible after the restore process starts, so you cannot access the control plane nodes. For this reason, it is recommended to establish SSH connectivity to each control plane host in a separate terminal. -+ -[IMPORTANT] -==== -If you do not complete this step, you will not be able to access the control plane hosts to complete the restore procedure, and you will be unable to recover your cluster from this state. -==== - -. Copy the etcd backup directory to the recovery control plane host. -+ -This procedure assumes that you copied the `backup` directory containing the etcd snapshot and the resources for the static pods to the `/home/core/` directory of your recovery control plane host. - -. Stop the static pods on any other control plane nodes. -+ -[NOTE] -==== -It is not required to manually stop the pods on the recovery host. The recovery script will stop the pods on the recovery host. -==== - -.. Access a control plane host that is not the recovery host. - -.. Move the existing etcd pod file out of the kubelet manifest directory: -+ -[source,terminal] ----- -$ sudo mv /etc/kubernetes/manifests/etcd-pod.yaml /tmp ----- - -.. Verify that the etcd pods are stopped. -+ -[source,terminal] ----- -$ sudo crictl ps | grep etcd | egrep -v "operator|etcd-guard" ----- -+ -The output of this command should be empty. If it is not empty, wait a few minutes and check again. - -.. Move the existing Kubernetes API server pod file out of the kubelet manifest directory: -+ -[source,terminal] ----- -$ sudo mv /etc/kubernetes/manifests/kube-apiserver-pod.yaml /tmp ----- - -.. Verify that the Kubernetes API server pods are stopped. -+ -[source,terminal] ----- -$ sudo crictl ps | grep kube-apiserver | egrep -v "operator|guard" ----- -+ -The output of this command should be empty. If it is not empty, wait a few minutes and check again. - -.. Move the etcd data directory to a different location: -+ -[source,terminal] ----- -$ sudo mv /var/lib/etcd/ /tmp ----- - -.. Repeat this step on each of the other control plane hosts that is not the recovery host. - -. Access the recovery control plane host. - -. If the cluster-wide proxy is enabled, be sure that you have exported the `NO_PROXY`, `HTTP_PROXY`, and `HTTPS_PROXY` environment variables. -+ -[TIP] -==== -You can check whether the proxy is enabled by reviewing the output of `oc get proxy cluster -o yaml`. The proxy is enabled if the `httpProxy`, `httpsProxy`, and `noProxy` fields have values set. -==== - -. Run the restore script on the recovery control plane host and pass in the path to the etcd backup directory: -+ -[source,terminal] ----- -$ sudo -E /usr/local/bin/cluster-restore.sh /home/core/backup ----- -+ -.Example script output -[source,terminal] ----- -...stopping kube-scheduler-pod.yaml -...stopping kube-controller-manager-pod.yaml -...stopping etcd-pod.yaml -...stopping kube-apiserver-pod.yaml -Waiting for container etcd to stop -.complete -Waiting for container etcdctl to stop -.............................complete -Waiting for container etcd-metrics to stop -complete -Waiting for container kube-controller-manager to stop -complete -Waiting for container kube-apiserver to stop -..........................................................................................complete -Waiting for container kube-scheduler to stop -complete -Moving etcd data-dir /var/lib/etcd/member to /var/lib/etcd-backup -starting restore-etcd static pod -starting kube-apiserver-pod.yaml -static-pod-resources/kube-apiserver-pod-7/kube-apiserver-pod.yaml -starting kube-controller-manager-pod.yaml -static-pod-resources/kube-controller-manager-pod-7/kube-controller-manager-pod.yaml -starting kube-scheduler-pod.yaml -static-pod-resources/kube-scheduler-pod-8/kube-scheduler-pod.yaml ----- -+ -[NOTE] -==== -The restore process can cause nodes to enter the `NotReady` state if the node certificates were updated after the last etcd backup. -==== - -. Check the nodes to ensure they are in the `Ready` state. - -.. Run the following command: -+ -[source,terminal] ----- -$ oc get nodes -w ----- -+ -.Sample output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -host-172-25-75-28 Ready master 3d20h v1.27.3 -host-172-25-75-38 Ready infra,worker 3d20h v1.27.3 -host-172-25-75-40 Ready master 3d20h v1.27.3 -host-172-25-75-65 Ready master 3d20h v1.27.3 -host-172-25-75-74 Ready infra,worker 3d20h v1.27.3 -host-172-25-75-79 Ready worker 3d20h v1.27.3 -host-172-25-75-86 Ready worker 3d20h v1.27.3 -host-172-25-75-98 Ready infra,worker 3d20h v1.27.3 ----- -+ -It can take several minutes for all nodes to report their state. - -.. If any nodes are in the `NotReady` state, log in to the nodes and remove all of the PEM files from the `/var/lib/kubelet/pki` directory on each node. You can SSH into the nodes or use the terminal window in the web console. -+ -[source,terminal] ----- -$ ssh -i <ssh-key-path> core@<master-hostname> ----- -+ -.Sample `pki` directory -[source,terminal] ----- -sh-4.4# pwd -/var/lib/kubelet/pki -sh-4.4# ls -kubelet-client-2022-04-28-11-24-09.pem kubelet-server-2022-04-28-11-24-15.pem -kubelet-client-current.pem kubelet-server-current.pem ----- - -. Restart the kubelet service on all control plane hosts. - -.. From the recovery host, run the following command: -+ -[source,terminal] ----- -$ sudo systemctl restart kubelet.service ----- - -.. Repeat this step on all other control plane hosts. - -. Approve the pending CSRs: -+ -[NOTE] -==== -Clusters with no worker nodes, such as single-node clusters or clusters consisting of three schedulable control plane nodes, will not have any pending CSRs to approve. In those scenarios, you can skip this step. -==== - -.. Get the list of current CSRs: -+ -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output ----- -NAME AGE SIGNERNAME REQUESTOR CONDITION -csr-2s94x 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending <1> -csr-4bd6t 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending <1> -csr-4hl85 13m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending <2> -csr-zhhhp 3m8s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending <2> -... ----- -<1> A pending kubelet service CSR (for user-provisioned installations). -<2> A pending `node-bootstrapper` CSR. - -.. Review the details of a CSR to verify that it is valid: -+ -[source,terminal] ----- -$ oc describe csr <csr_name> <1> ----- -<1> `<csr_name>` is the name of a CSR from the list of current CSRs. - -.. Approve each valid `node-bootstrapper` CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> ----- - -.. For user-provisioned installations, approve each valid kubelet service CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> ----- - -. Verify that the single member control plane has started successfully. - -.. From the recovery host, verify that the etcd container is running. -+ -[source,terminal] ----- -$ sudo crictl ps | grep etcd | egrep -v "operator|etcd-guard" ----- -+ -.Example output -[source,terminal] ----- -3ad41b7908e32 36f86e2eeaaffe662df0d21041eb22b8198e0e58abeeae8c743c3e6e977e8009 About a minute ago Running etcd 0 7c05f8af362f0 ----- - -.. From the recovery host, verify that the etcd pod is running. -+ -[source,terminal] ----- -$ oc -n openshift-etcd get pods -l k8s-app=etcd ----- -+ -[NOTE] -==== -If you attempt to run `oc login` prior to running this command and receive the following error, wait a few moments for the authentication controllers to start and try again. - -[source,terminal] ----- -Unable to connect to the server: EOF ----- -==== -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -etcd-ip-10-0-143-125.ec2.internal 1/1 Running 1 2m47s ----- -+ -If the status is `Pending`, or the output lists more than one running etcd pod, wait a few minutes and check again. -+ -[NOTE] -==== -Perform the following step only if you are using `OVNKubernetes` network plugin. -==== - -. Delete the node objects that are associated with control plane hosts that are not the recovery control plane host. -+ -[source,terminal] ----- -$ oc delete node <non-recovery-controlplane-host-1> <non-recovery-controlplane-host-2> ----- - -. Verify that the Cluster Network Operator (CNO) redeploys the OVN-Kubernetes control plane and that it no longer references the non-recovery controller IP addresses. To verify this result, regularly check the output of the following command. Wait until it returns an empty result before you proceed with the next step. -+ -[source,terminal] ----- -$ oc -n openshift-ovn-kubernetes get ds/ovnkube-master -o yaml | grep -E '<non-recovery_controller_ip_1>|<non-recovery_controller_ip_2>' ----- -+ -[NOTE] -==== -It can take at least 5-10 minutes for the OVN-Kubernetes control plane to be redeployed and the previous command to return empty output. -==== - -. Restart the Open Virtual Network (OVN) Kubernetes pods on all the hosts. -+ -[NOTE] -==== -Validating and mutating admission webhooks can reject pods. If you add any additional webhooks with the `failurePolicy` set to `Fail`, then they can reject pods and the restoration process can fail. You can avoid this by saving and deleting webhooks while restoring the cluster state. After the cluster state is restored successfully, you can enable the webhooks again. - -Alternatively, you can temporarily set the `failurePolicy` to `Ignore` while restoring the cluster state. After the cluster state is restored successfully, you can set the `failurePolicy` to `Fail`. -==== - -.. Remove the northbound database (nbdb) and southbound database (sbdb). Access the recovery host and the remaining control plane nodes by using Secure Shell (SSH) and run the following command: -+ -[source,terminal] ----- -$ sudo rm -f /var/lib/ovn/etc/*.db ----- - -.. Delete all OVN-Kubernetes control plane pods by running the following command: -+ -[source,terminal] ----- -$ oc delete pods -l app=ovnkube-master -n openshift-ovn-kubernetes ----- - -.. Ensure that any OVN-Kubernetes control plane pods are deployed again and are in a `Running` state by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app=ovnkube-master -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -ovnkube-master-nb24h 4/4 Running 0 48s ----- - -.. Delete all `ovnkube-node` pods by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes -o name | grep ovnkube-node | while read p ; do oc delete $p -n openshift-ovn-kubernetes ; done ----- - -.. Ensure that all the `ovnkube-node` pods are deployed again and are in a `Running` state by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes | grep ovnkube-node ----- - -. Delete and re-create other non-recovery, control plane machines, one by one. After the machines are re-created, a new revision is forced and etcd automatically scales up. -+ -** If you use a user-provisioned bare metal installation, you can re-create a control plane machine by using the same method that you used to originally create it. For more information, see "Installing a user-provisioned cluster on bare metal". -+ -[WARNING] -==== -Do not delete and re-create the machine for the recovery host. -==== -+ -** If you are running installer-provisioned infrastructure, or you used the Machine API to create your machines, follow these steps: -+ -[WARNING] -==== -Do not delete and re-create the machine for the recovery host. - -For bare metal installations on installer-provisioned infrastructure, control plane machines are not re-created. For more information, see "Replacing a bare-metal control plane node". -==== -.. Obtain the machine for one of the lost control plane hosts. -+ -In a terminal that has access to the cluster as a cluster-admin user, run the following command: -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api -o wide ----- -+ -Example output: -+ -[source,terminal] ----- -NAME PHASE TYPE REGION ZONE AGE NODE PROVIDERID STATE -clustername-8qw5l-master-0 Running m4.xlarge us-east-1 us-east-1a 3h37m ip-10-0-131-183.ec2.internal aws:///us-east-1a/i-0ec2782f8287dfb7e stopped <1> -clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-143-125.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running -clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-154-194.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running -clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running -clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running -clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running ----- -<1> This is the control plane machine for the lost control plane host, `ip-10-0-131-183.ec2.internal`. - -.. Save the machine configuration to a file on your file system: -+ -[source,terminal] ----- -$ oc get machine clustername-8qw5l-master-0 \ <1> - -n openshift-machine-api \ - -o yaml \ - > new-master-machine.yaml ----- -<1> Specify the name of the control plane machine for the lost control plane host. - -.. Edit the `new-master-machine.yaml` file that was created in the previous step to assign a new name and remove unnecessary fields. - -... Remove the entire `status` section: -+ -[source,terminal] ----- -status: - addresses: - - address: 10.0.131.183 - type: InternalIP - - address: ip-10-0-131-183.ec2.internal - type: InternalDNS - - address: ip-10-0-131-183.ec2.internal - type: Hostname - lastUpdated: "2020-04-20T17:44:29Z" - nodeRef: - kind: Node - name: ip-10-0-131-183.ec2.internal - uid: acca4411-af0d-4387-b73e-52b2484295ad - phase: Running - providerStatus: - apiVersion: awsproviderconfig.openshift.io/v1beta1 - conditions: - - lastProbeTime: "2020-04-20T16:53:50Z" - lastTransitionTime: "2020-04-20T16:53:50Z" - message: machine successfully created - reason: MachineCreationSucceeded - status: "True" - type: MachineCreation - instanceId: i-0fdb85790d76d0c3f - instanceState: stopped - kind: AWSMachineProviderStatus ----- - -... Change the `metadata.name` field to a new name. -+ -It is recommended to keep the same base name as the old machine and change the ending number to the next available number. In this example, `clustername-8qw5l-master-0` is changed to `clustername-8qw5l-master-3`: -+ -[source,terminal] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - ... - name: clustername-8qw5l-master-3 - ... ----- - -... Remove the `spec.providerID` field: -+ -[source,terminal] ----- -providerID: aws:///us-east-1a/i-0fdb85790d76d0c3f ----- - -... Remove the `metadata.annotations` and `metadata.generation` fields: -+ -[source,terminal] ----- -annotations: - machine.openshift.io/instance-state: running -... -generation: 2 ----- - -... Remove the `metadata.resourceVersion` and `metadata.uid` fields: -+ -[source,terminal] ----- -resourceVersion: "13291" -uid: a282eb70-40a2-4e89-8009-d05dd420d31a ----- - -.. Delete the machine of the lost control plane host: -+ -[source,terminal] ----- -$ oc delete machine -n openshift-machine-api clustername-8qw5l-master-0 <1> ----- -<1> Specify the name of the control plane machine for the lost control plane host. - -.. Verify that the machine was deleted: -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api -o wide ----- -+ -Example output: -+ -[source,terminal] ----- -NAME PHASE TYPE REGION ZONE AGE NODE PROVIDERID STATE -clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-143-125.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running -clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-154-194.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running -clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running -clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running -clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running ----- - -.. Create a machine by using the `new-master-machine.yaml` file: -+ -[source,terminal] ----- -$ oc apply -f new-master-machine.yaml ----- - -.. Verify that the new machine has been created: -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api -o wide ----- -+ -Example output: -+ -[source,terminal] ----- -NAME PHASE TYPE REGION ZONE AGE NODE PROVIDERID STATE -clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-143-125.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running -clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-154-194.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running -clustername-8qw5l-master-3 Provisioning m4.xlarge us-east-1 us-east-1a 85s ip-10-0-173-171.ec2.internal aws:///us-east-1a/i-015b0888fe17bc2c8 running <1> -clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running -clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running -clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running ----- -<1> The new machine, `clustername-8qw5l-master-3` is being created and is ready after the phase changes from `Provisioning` to `Running`. -+ -It might take a few minutes for the new machine to be created. The etcd cluster Operator will automatically sync when the machine or node returns to a healthy state. - -.. Repeat these steps for each lost control plane host that is not the recovery host. - -. Turn off the quorum guard by entering the following command: -+ -[source,terminal] ----- -$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true}}}' ----- -+ -This command ensures that you can successfully re-create secrets and roll out the static pods. - -. In a separate terminal window, log in to the cluster as a user with the `cluster-admin` role by entering the following command: -+ -[source,terminal] ----- -$ oc login -u <cluster_admin> <1> ----- -<1> For `<cluster_admin>`, specify a user name with the `cluster-admin` role. - -. Force etcd redeployment. -+ -In a terminal that has access to the cluster as a `cluster-admin` user, run the following command: -+ -[source,terminal] ----- -$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge <1> ----- -<1> The `forceRedeploymentReason` value must be unique, which is why a timestamp is appended. -+ -When the etcd cluster Operator performs a redeployment, the existing nodes are started with new pods similar to the initial bootstrap scale up. - -. Turn the quorum guard back on by entering the following command: -+ -[source,terminal] ----- -$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": null}}' ----- - -. You can verify that the `unsupportedConfigOverrides` section is removed from the object by entering this command: -+ -[source,terminal] ----- -$ oc get etcd/cluster -oyaml ----- - -. Verify all nodes are updated to the latest revision. -+ -In a terminal that has access to the cluster as a `cluster-admin` user, run the following command: -+ -[source,terminal] ----- -$ oc get etcd -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for etcd to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 7 <1> ----- -<1> In this example, the latest revision number is `7`. -+ -If the output includes multiple revision numbers, such as `2 nodes are at revision 6; 1 nodes are at revision 7`, this means that the update is still in progress. Wait a few minutes and try again. - -. After etcd is redeployed, force new rollouts for the control plane. The Kubernetes API server will reinstall itself on the other nodes because the kubelet is connected to API servers using an internal load balancer. -+ -In a terminal that has access to the cluster as a `cluster-admin` user, run the following commands. - -.. Force a new rollout for the Kubernetes API server: -+ -[source,terminal] ----- -$ oc patch kubeapiserver cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge ----- -+ -Verify all nodes are updated to the latest revision. -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 7 <1> ----- -<1> In this example, the latest revision number is `7`. -+ -If the output includes multiple revision numbers, such as `2 nodes are at revision 6; 1 nodes are at revision 7`, this means that the update is still in progress. Wait a few minutes and try again. - -.. Force a new rollout for the Kubernetes controller manager: -+ -[source,terminal] ----- -$ oc patch kubecontrollermanager cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge ----- -+ -Verify all nodes are updated to the latest revision. -+ -[source,terminal] ----- -$ oc get kubecontrollermanager -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 7 <1> ----- -<1> In this example, the latest revision number is `7`. -+ -If the output includes multiple revision numbers, such as `2 nodes are at revision 6; 1 nodes are at revision 7`, this means that the update is still in progress. Wait a few minutes and try again. - -.. Force a new rollout for the Kubernetes scheduler: -+ -[source,terminal] ----- -$ oc patch kubescheduler cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge ----- -+ -Verify all nodes are updated to the latest revision. -+ -[source,terminal] ----- -$ oc get kubescheduler -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 7 <1> ----- -<1> In this example, the latest revision number is `7`. -+ -If the output includes multiple revision numbers, such as `2 nodes are at revision 6; 1 nodes are at revision 7`, this means that the update is still in progress. Wait a few minutes and try again. - -. Verify that all control plane hosts have started and joined the cluster. -+ -In a terminal that has access to the cluster as a `cluster-admin` user, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-etcd get pods -l k8s-app=etcd ----- -+ -.Example output -[source,terminal] ----- -etcd-ip-10-0-143-125.ec2.internal 2/2 Running 0 9h -etcd-ip-10-0-154-194.ec2.internal 2/2 Running 0 9h -etcd-ip-10-0-173-171.ec2.internal 2/2 Running 0 9h ----- - -To ensure that all workloads return to normal operation following a recovery procedure, restart each pod that stores Kubernetes API information. This includes {product-title} components such as routers, Operators, and third-party components. - -[NOTE] -==== -On completion of the previous procedural steps, you might need to wait a few minutes for all services to return to their restored state. For example, authentication by using `oc login` might not immediately work until the OAuth server pods are restarted. - -Consider using the `system:admin` `kubeconfig` file for immediate authentication. This method basis its authentication on SSL/TLS client certificates as against OAuth tokens. You can authenticate with this file by issuing the following command: - -[source,terminal] ----- -$ export KUBECONFIG=<installation_directory>/auth/kubeconfig ----- - -Issue the following command to display your authenticated user name: - -[source,terminal] ----- -$ oc whoami ----- -==== diff --git a/modules/dr-scenario-cluster-state-issues.adoc b/modules/dr-scenario-cluster-state-issues.adoc deleted file mode 100644 index 8d5dba8e2fb6..000000000000 --- a/modules/dr-scenario-cluster-state-issues.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/scenario-2-restoring-cluster-state.adoc -// * post_installation_configuration/cluster-tasks.adoc - -[id="dr-scenario-cluster-state-issues_{context}"] -= Issues and workarounds for restoring a persistent storage state - -If your {product-title} cluster uses persistent storage of any form, a state of the cluster is typically stored outside etcd. It might be an Elasticsearch cluster running in a pod or a database running in a `StatefulSet` object. When you restore from an etcd backup, the status of the workloads in {product-title} is also restored. However, if the etcd snapshot is old, the status might be invalid or outdated. - -[IMPORTANT] -==== -The contents of persistent volumes (PVs) are never part of the etcd snapshot. When you restore an {product-title} cluster from an etcd snapshot, non-critical workloads might gain access to critical data, or vice-versa. -==== - -The following are some example scenarios that produce an out-of-date status: - -* MySQL database is running in a pod backed up by a PV object. Restoring {product-title} from an etcd snapshot does not bring back the volume on the storage provider, and does not produce a running MySQL pod, despite the pod repeatedly attempting to start. You must manually restore this pod by restoring the volume on the storage provider, and then editing the PV to point to the new volume. - -* Pod P1 is using volume A, which is attached to node X. If the etcd snapshot is taken while another pod uses the same volume on node Y, then when the etcd restore is performed, pod P1 might not be able to start correctly due to the volume still being attached to node Y. {product-title} is not aware of the attachment, and does not automatically detach it. When this occurs, the volume must be manually detached from node Y so that the volume can attach on node X, and then pod P1 can start. - -* Cloud provider or storage provider credentials were updated after the etcd snapshot was taken. This causes any CSI drivers or Operators that depend on the those credentials to not work. You might have to manually update the credentials required by those drivers or Operators. - -* A device is removed or renamed from {product-title} nodes after the etcd snapshot is taken. The Local Storage Operator creates symlinks for each PV that it manages from `/dev/disk/by-id` or `/dev` directories. This situation might cause the local PVs to refer to devices that no longer exist. -+ -To fix this problem, an administrator must: - -. Manually remove the PVs with invalid devices. -. Remove symlinks from respective nodes. -. Delete `LocalVolume` or `LocalVolumeSet` objects (see _Storage_ -> _Configuring persistent storage_ -> _Persistent storage using local volumes_ -> _Deleting the Local Storage Operator Resources_). diff --git a/modules/dynamic-plug-in-development.adoc b/modules/dynamic-plug-in-development.adoc deleted file mode 100644 index e7fc7936f804..000000000000 --- a/modules/dynamic-plug-in-development.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/dynamic-plugins-get-started.adoc - -:_content-type: PROCEDURE -[id="dynamic-plugin-development_{context}"] -= Dynamic plugin development - -You can run the plugin using a local development environment. The {product-title} web console runs in a container connected to the cluster you have logged into. - -.Prerequisites -* You must have an OpenShift cluster running. -* You must have the OpenShift CLI (`oc`) installed. -* You must have link:https://yarnpkg.com/[`yarn`] installed. -* You must have link:https://www.docker.com/[Docker] v3.2.0 or newer or link:https://podman.io/[Podman] installed and running. - -.Procedure - -. In your terminal, run the following command to install the dependencies for your plugin using yarn. - -+ -[source,terminal] ----- -$ yarn install ----- - -. After installing, run the following command to start yarn. - -+ -[source,terminal] ----- -$ yarn run start ----- - -. In another terminal window, login to the {product-title} through the CLI. -+ -[source,terminal] ----- -$ oc login ----- - -. Run the {product-title} web console in a container connected to the cluster you have logged into by running the following command: -+ -[source,terminal] ----- -$ yarn run start-console ----- - -.Verification -* Visit link:http://localhost:9000/example[localhost:9000] to view the running plugin. Inspect the value of `window.SERVER_FLAGS.consolePlugins` to see the list of plugins which load at runtime. diff --git a/modules/dynamic-plugin-api.adoc b/modules/dynamic-plugin-api.adoc deleted file mode 100644 index f7b3357eac4a..000000000000 --- a/modules/dynamic-plugin-api.adoc +++ /dev/null @@ -1,1512 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/dynamic-plugins-reference.adoc - -:power-bi-url: https://microsoft.github.io/PowerBI-JavaScript/interfaces/_node_modules_typedoc_node_modules_typescript_lib_lib_dom_d_.requestinit.html -//needed to add an attribute for the url because escaping the underscore in the link would not work and the build was failing. - -:_content-type: REFERENCE -[id="dynamic-plugin-api_{context}"] -= {product-title} console API - -[discrete] -== `useActivePerspective` - -Hook that provides the currently active perspective and a callback for setting the active perspective. It returns a tuple containing the current active perspective and setter callback. - -.Example -[source,tsx] ----- -const Component: React.FC = (props) => { - const [activePerspective, setActivePerspective] = useActivePerspective(); - return <select - value={activePerspective} - onChange={(e) => setActivePerspective(e.target.value)} - > - { - // ...perspective options - } - </select> -} ----- - -[discrete] -== `GreenCheckCircleIcon` - -Component for displaying a green check mark circle icon. - -.Example -[source,tsx] ----- -<GreenCheckCircleIcon title="Healthy" /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`className` |(optional) additional class name for the component -|`title` |(optional) icon title -|`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) -|=== - -[discrete] -== `RedExclamationCircleIcon` - -Component for displaying a red exclamation mark circle icon. - -.Example -[source,tsx] ----- -<RedExclamationCircleIcon title="Failed" /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`className` |(optional) additional class name for the component -|`title` |(optional) icon title -|`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) -|=== - -[discrete] -== `YellowExclamationTriangleIcon` - -Component for displaying a yellow triangle exclamation icon. - -.Example -[source,tsx] ----- -<YellowExclamationTriangleIcon title="Warning" /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`className` |(optional) additional class name for the component -|`title` |(optional) icon title -|`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) -|=== - -[discrete] -== `BlueInfoCircleIcon` - -Component for displaying a blue info circle icon. - -.Example -[source,tsx] ----- -<BlueInfoCircleIcon title="Info" /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`className` |(optional) additional class name for the component -|`title` |(optional) icon title -|`size` |(optional) icon size: ('sm', 'md', 'lg', 'xl') -|=== - -[discrete] -== `ErrorStatus` - -Component for displaying an error status popover. - -.Example -[source,tsx] ----- -<ErrorStatus title={errorMsg} /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`title` |(optional) status text -|`iconOnly` |(optional) if true, only displays icon -|`noTooltip` |(optional) if true, tooltip won't be displayed -|`className` |(optional) additional class name for the component -|`popoverTitle` |(optional) title for popover -|=== - -[discrete] -== `InfoStatus` - -Component for displaying an information status popover. - -.Example -[source,tsx] ----- -<InfoStatus title={infoMsg} /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`title` |(optional) status text -|`iconOnly` |(optional) if true, only displays icon -|`noTooltip` |(optional) if true, tooltip won't be displayed -|`className` |(optional) additional class name for the component -|`popoverTitle` |(optional) title for popover -|=== - -[discrete] -== `ProgressStatus` - -Component for displaying a progressing status popover. - -.Example -[source,tsx] ----- -<ProgressStatus title={progressMsg} /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`title` |(optional) status text -|`iconOnly` |(optional) if true, only displays icon -|`noTooltip` |(optional) if true, tooltip won't be displayed -|`className` |(optional) additional class name for the component -|`popoverTitle` |(optional) title for popover -|=== - -[discrete] -== `SuccessStatus` - -Component for displaying a success status popover. - -.Example -[source,tsx] ----- -<SuccessStatus title={successMsg} /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`title` |(optional) status text -|`iconOnly` |(optional) if true, only displays icon -|`noTooltip` |(optional) if true, tooltip won't be displayed -|`className` |(optional) additional class name for the component -|`popoverTitle` |(optional) title for popover -|=== - -[discrete] -== `checkAccess` - -Provides information about user access to a given resource. It returns an object with resource access information. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resourceAttributes` |resource attributes for access review -|`impersonate` |impersonation details -|=== - -[discrete] -== `useAccessReview` - -Hook that provides information about user access to a given resource. It returns an array with `isAllowed` and `loading` values. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resourceAttributes` |resource attributes for access review -|`impersonate` |impersonation details -|=== - -[discrete] -== `useResolvedExtensions` - -React hook for consuming Console extensions with resolved `CodeRef` properties. This hook accepts the same argument(s) as `useExtensions` hook and returns an adapted list of extension instances, resolving all code references within each extension's properties. - -Initially, the hook returns an empty array. After the resolution is complete, the React component is re-rendered with the hook returning an adapted list of extensions. When the list of matching extensions changes, the resolution is restarted. The hook will continue to return the previous result until the resolution completes. - -The hook's result elements are guaranteed to be referentially stable across re-renders. It returns a tuple containing a list of adapted extension instances with resolved code references, a boolean flag indicating whether the resolution is complete, and a list of errors detected during the resolution. - -.Example -[source,ts] ----- -const [navItemExtensions, navItemsResolved] = useResolvedExtensions<NavItem>(isNavItem); -// process adapted extensions and render your component ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`typeGuards` |A list of callbacks that each accept a dynamic plugin -extension as an argument and return a boolean flag indicating whether or -not the extension meets desired type constraints -|=== - -[discrete] -== `HorizontalNav` - -A component that creates a Navigation bar for a page. Routing is handled as part of the component. `console.tab/horizontalNav` can be used to add additional content to any horizontal navigation. - -.Example - -[source,jsx] ----- -const HomePage: React.FC = (props) => { - const page = { - href: '/home', - name: 'Home', - component: () => <>Home</> - } - return <HorizontalNav match={props.match} pages={[page]} /> -} ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resource` |The resource associated with this Navigation, an object of -K8sResourceCommon type - -|`pages` |An array of page objects - -|`match` |match object provided by React Router -|=== - -[discrete] -== `VirtualizedTable` - -A component for making virtualized tables. - -.Example -[source,text] ----- -const MachineList: React.FC<MachineListProps> = (props) => { - return ( - <VirtualizedTable<MachineKind> - {...props} - aria-label='Machines' - columns={getMachineColumns} - Row={getMachineTableRow} - /> - ); -} ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`data` |data for table -|`loaded` |flag indicating data is loaded -|`loadError` |error object if issue loading data -|`columns` |column setup -|`Row` |row setup -|`unfilteredData` |original data without filter -|`NoDataEmptyMsg` |(optional) no data empty message component -|`EmptyMsg` |(optional) empty message component -|`scrollNode` |(optional) function to handle scroll -|`label` |(optional) label for table -|`ariaLabel` |(optional) aria label -|`gridBreakPoint` |sizing of how to break up grid for responsiveness -|`onSelect` |(optional) function for handling select of table -|`rowData` |(optional) data specific to row -|=== - -[discrete] -== `TableData` - -Component for displaying table data within a table row. - -.Example -[source,jsx] - ----- -const PodRow: React.FC<RowProps<K8sResourceCommon>> = ({ obj, activeColumnIDs }) => { - return ( - <> - <TableData id={columns[0].id} activeColumnIDs={activeColumnIDs}> - <ResourceLink kind="Pod" name={obj.metadata.name} namespace={obj.metadata.namespace} /> - </TableData> - <TableData id={columns[1].id} activeColumnIDs={activeColumnIDs}> - <ResourceLink kind="Namespace" name={obj.metadata.namespace} /> - </TableData> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`id` |unique ID for table -|`activeColumnIDs` |active columns -|`className` |(optional) option class name for styling -|=== - -[discrete] -== `useActiveColumns` - -A hook that provides a list of user-selected active TableColumns. - -.Example -[source,text] ----- -// See implementation for more details on TableColumn type - const [activeColumns, userSettingsLoaded] = useActiveColumns({ - columns, - showNamespaceOverride: false, - columnManagementID, - }); - return userSettingsAreLoaded ? <VirtualizedTable columns={activeColumns} {...otherProps} /> : null ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as a key-value map - -|`\{TableColumn[]} options.columns` | An array of all available -TableColumns - -|`\{boolean} [options.showNamespaceOverride]` |(optional) If true, a -namespace column will be included, regardless of column management -selections - -|`\{string} [options.columnManagementID]` |(optional) A unique ID -used to persist and retrieve column management selections to and from -user settings. Usually a group/version/kind (GVK) string for a resource. -|=== - -A tuple containing the current user selected active columns (a subset of options.columns), and a boolean flag indicating whether user settings have been loaded. - -[discrete] -== `ListPageHeader` - -Component for generating a page header. - -.Example -[source,jsx] ----- -const exampleList: React.FC = () => { - return ( - <> - <ListPageHeader title="Example List Page"/> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`title` |heading title -|`helpText` |(optional) help section as react node -|`badge` |(optional) badge icon as react node -|=== - -[discrete] -== `ListPageCreate` - -Component for adding a create button for a specific resource kind that automatically generates a link to the create YAML for this resource. - -.Example -[source,jsx] ----- -const exampleList: React.FC<MyProps> = () => { - return ( - <> - <ListPageHeader title="Example Pod List Page"/> - <ListPageCreate groupVersionKind="Pod">Create Pod</ListPageCreate> - </ListPageHeader> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`groupVersionKind` |the resource group/version/kind to represent -|=== - -[discrete] -== `ListPageCreateLink` - -Component for creating a stylized link. - -.Example -[source,jsx] ----- -const exampleList: React.FC<MyProps> = () => { - return ( - <> - <ListPageHeader title="Example Pod List Page"/> - <ListPageCreateLink to={'/link/to/my/page'}>Create Item</ListPageCreateLink> - </ListPageHeader> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`to` |string location where link should direct - -|`createAccessReview` |(optional) object with namespace and kind used to -determine access - -|`children` |(optional) children for the component -|=== - -[discrete] -== `ListPageCreateButton` - -Component for creating button. - -.Example -[source,jsx] ----- -const exampleList: React.FC<MyProps> = () => { - return ( - <> - <ListPageHeader title="Example Pod List Page"/> - <ListPageCreateButton createAccessReview={access}>Create Pod</ListPageCreateButton> - </ListPageHeader> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`createAccessReview` |(optional) object with namespace and kind used to -determine access - -|`pfButtonProps` |(optional) Patternfly Button props -|=== - -[discrete] -== `ListPageCreateDropdown` - -Component for creating a dropdown wrapped with permissions check. - -.Example -[source,jsx] ----- -const exampleList: React.FC<MyProps> = () => { - const items = { - SAVE: 'Save', - DELETE: 'Delete', - } - return ( - <> - <ListPageHeader title="Example Pod List Page"/> - <ListPageCreateDropdown createAccessReview={access} items={items}>Actions</ListPageCreateDropdown> - </ListPageHeader> - </> - ); -}; ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`items` |key:ReactNode pairs of items to display in dropdown component - -|`onClick` |callback function for click on dropdown items - -|`createAccessReview` |(optional) object with namespace and kind used to -determine access - -|`children` |(optional) children for the dropdown toggle -|=== - -[discrete] -== `ListPageFilter` - -Component that generates filter for list page. - -.Example -[source,tsx] ----- - // See implementation for more details on RowFilter and FilterValue types - const [staticData, filteredData, onFilterChange] = useListPageFilter( - data, - rowFilters, - staticFilters, - ); - // ListPageFilter updates filter state based on user interaction and resulting filtered data can be rendered in an independent component. - return ( - <> - <ListPageHeader .../> - <ListPagBody> - <ListPageFilter data={staticData} onFilterChange={onFilterChange} /> - <List data={filteredData} /> - </ListPageBody> - </> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`data` |An array of data points - -|`loaded` |indicates that data has loaded - -|`onFilterChange` |callback function for when filter is updated - -|`rowFilters` |(optional) An array of RowFilter elements that define the -available filter options - -|`nameFilterPlaceholder` |(optional) placeholder for name filter - -|`labelFilterPlaceholder` |(optional) placeholder for label filter - -|`hideLabelFilter` |(optional) only shows the name filter instead of -both name and label filter - -|`hideNameLabelFilter` |(optional) hides both name and label filter - -|`columnLayout` |(optional) column layout object - -|`hideColumnManagement` |(optional) flag to hide the column management -|=== - -[discrete] -== `useListPageFilter` - -A hook that manages filter state for the ListPageFilter component. It returns a tuple containing the data filtered by all static filters, the data filtered by all static and row filters, and a callback that updates rowFilters. - -.Example -[source,tsx] ----- - // See implementation for more details on RowFilter and FilterValue types - const [staticData, filteredData, onFilterChange] = useListPageFilter( - data, - rowFilters, - staticFilters, - ); - // ListPageFilter updates filter state based on user interaction and resulting filtered data can be rendered in an independent component. - return ( - <> - <ListPageHeader .../> - <ListPagBody> - <ListPageFilter data={staticData} onFilterChange={onFilterChange} /> - <List data={filteredData} /> - </ListPageBody> - </> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`data` |An array of data points - -|`rowFilters` |(optional) An array of RowFilter elements that define the -available filter options - -|`staticFilters` |(optional) An array of FilterValue elements that are -statically applied to the data -|=== - -[discrete] -== `ResourceLink` - -Component that creates a link to a specific resource type with an icon badge. - -.Example -[source,tsx] ----- - <ResourceLink - kind="Pod" - name="testPod" - title={metadata.uid} - /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`kind` |(optional) the kind of resource i.e. Pod, Deployment, Namespace - -|`groupVersionKind` |(optional) object with group, version, and kind - -|`className` |(optional) class style for component - -|`displayName` |(optional) display name for component, overwrites the -resource name if set - -|`inline` |(optional) flag to create icon badge and name inline with -children - -|`linkTo` |(optional) flag to create a Link object - defaults to true - -|`name` |(optional) name of resource - -|`namesapce` |(optional) specific namespace for the kind resource to -link to - -|`hideIcon` |(optional) flag to hide the icon badge - -|`title` |(optional) title for the link object (not displayed) - -|`dataTest` |(optional) identifier for testing - -|`onClick` |(optional) callback function for when component is clicked - -|`truncate` |(optional) flag to truncate the link if too long -|=== - -[discrete] -== `ResourceIcon` - -Component that creates an icon badge for a specific resource type. - -.Example -[source,tsx] ----- -<ResourceIcon kind="Pod"/> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`kind` |(optional) the kind of resource i.e. Pod, Deployment, Namespace -|`groupVersionKind` |(optional) object with group, version, and kind -|`className` |(optional) class style for component -|=== - -[discrete] -== `useK8sModel` - -Hook that retrieves the k8s model for provided K8sGroupVersionKind from redux. It returns an array with the first item as k8s model and second item as `inFlight` status. - -.Example -[source,ts] ----- -const Component: React.FC = () => { - const [model, inFlight] = useK8sModel({ group: 'app'; version: 'v1'; kind: 'Deployment' }); - return ... -} ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`groupVersionKind` |group, version, kind of k8s resource -K8sGroupVersionKind is preferred alternatively can pass reference for -group, version, kind which is deprecated, i.e, group/version/kind (GVK) K8sResourceKindReference. -|=== - -[discrete] -== `useK8sModels` - -Hook that retrieves all current k8s models from redux. It returns an array with the first item as the list of k8s model and second item as `inFlight` status. - -.Example -[source,ts] ----- -const Component: React.FC = () => { - const [models, inFlight] = UseK8sModels(); - return ... -} ----- - -[discrete] -== `useK8sWatchResource` - -Hook that retrieves the k8s resource along with status for loaded and error. It returns an array with first item as resource(s), second item as loaded status and third item as error state if any. - -.Example -[source,ts] ----- -const Component: React.FC = () => { - const watchRes = { - ... - } - const [data, loaded, error] = useK8sWatchResource(watchRes) - return ... -} ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`initResource` |options needed to watch for resource. -|=== - -[discrete] -== `useK8sWatchResources` - -Hook that retrieves the k8s resources along with their respective status for loaded and error. It returns a map where keys are as provided in initResouces and value has three properties data, loaded and error. - -.Example -[source,tsx] ----- -const Component: React.FC = () => { - const watchResources = { - 'deployment': {...}, - 'pod': {...} - ... - } - const {deployment, pod} = useK8sWatchResources(watchResources) - return ... -} ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`initResources` |Resources must be watched as key-value pair, -wherein key will be unique to resource and value will be options needed -to watch for the respective resource. -|=== - -[discrete] -== `consoleFetch` - -A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts.It also validates the response status code and throws appropriate error or logs out the user if required. It returns a promise that resolves to the response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`url` |The URL to fetch -|`options` |The options to pass to fetch -|`timeout` |The timeout in milliseconds -|=== - -[discrete] -== `consoleFetchJSON` - -A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts. It also validates the response status code and throws appropriate error or logs out the user if required. It returns the response as a JSON object. Uses `consoleFetch` internally. It returns a promise that resolves to the response as JSON object. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`url` |The URL to fetch - -|`method` |The HTTP method to use. Defaults to GET - -|`options` |The options to pass to fetch - -|`timeout` |The timeout in milliseconds - -|`cluster` |The name of the cluster to make the request to. Defaults to -the active cluster the user has selected -|=== - -[discrete] -== `consoleFetchText` - -A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts. It also validates the response status code and throws appropriate error or logs out the user if required. It returns the response as a text. Uses `consoleFetch` internally. It returns a promise that resolves to the response as text. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`url` |The URL to fetch - -|`options` |The options to pass to fetch - -|`timeout` |The timeout in milliseconds - -|`cluster` |The name of the cluster to make the request to. Defaults to -the active cluster the user has selected -|=== - -[discrete] -== `getConsoleRequestHeaders` - -A function that creates impersonation and multicluster related headers for API requests using current redux state. It returns an object containing the appropriate impersonation and clustr requst headers, based on redux state. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`targetCluster` |Override the current active cluster with the provided -targetCluster -|=== - -[discrete] -== `k8sGetResource` - -It fetches a resource from the cluster, based on the provided options. If the name is provided it returns one resource else it returns all the resources matching the model. It returns a promise that resolves to the response as JSON object with a resource if the name is providedelse it returns all the resources matching the -model. In case of failure, the promise gets rejected with HTTP error response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pairs in the map - -|`options.model` |k8s model - -|`options.name` |The name of the resource, if not provided then it will -look for all the resources matching the model. - -|`options.ns` | The namespace to look into, should not be specified -for cluster-scoped resources. - -|`options.path` |Appends as subpath if provided - -|`options.queryParams` |The query parameters to be included in the -URL. - -|`options.requestInit` |The fetch init object to use. This can have -request headers, method, redirect, etc. See link:{power-bi-url}[Interface RequestInit] for more. -|=== - -[discrete] -== `k8sCreateResource` - -It creates a resource in the cluster, based on the provided options. It returns a promise that resolves to the response of the resource created. In case of failure promise gets rejected with HTTP error response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pairs in the map - -|`options.model` |k8s model - -|`options.data` |Payload for the resource to be created - -|`options.path` |Appends as subpath if provided - -|`options.queryParams` |The query parameters to be included in the -URL. -|=== - -[discrete] -== `k8sUpdateResource` - -It updates the entire resource in the cluster, based on providedoptions. When a client needs to replace an existing resource entirely, they can use k8sUpdate. Alternatively can use k8sPatch to perform the partial update. It returns a promise that resolves to the response of the resource updated. In case of failure promise gets rejected with HTTP error response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pair in the map - -|`options.model` | k8s model - -|`options.data` |Payload for the k8s resource to be updated - -|`options.ns` |Namespace to look into, it should not be specified for -cluster-scoped resources. - -|`options.name` |Resource name to be updated. - -|`options.path` | Appends as subpath if provided - -|`options.queryParams` | The query parameters to be included in the -URL. -|=== - -[discrete] -== `k8sPatchResource` - -It patches any resource in the cluster, based on provided options. When a client needs to perform the partial update, they can use -k8sPatch. Alternatively can use k8sUpdate to replace an existing resource entirely. See link:https://datatracker.ietf.org/doc/html/rfc6902[Data Tracker] for more. It returns a promise that resolves to the response of the resource patched. In case of failure promise gets rejected with HTTP error response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pairs in the map. - -|`options.model` | k8s model - -|`options.resource` |The resource to be patched. - -|`options.data` |Only the data to be patched on existing resource -with the operation, path, and value. - -|`options.path` |Appends as subpath if provided. - -|`options.queryParams` | The query parameters to be included in the -URL. -|=== - -[discrete] -== `k8sDeleteResource` - -It deletes resources from the cluster, based on the provided model, resource. The garbage collection works based on `Foreground`|`Background` can be configured with propagationPolicy property in provided model or passed in json. It returns a promise that resolves to the response of kind Status. In case of failure promise gets rejected with HTTP error response. - -.Example -`kind: 'DeleteOptions', apiVersion: 'v1', propagationPolicy` - - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pair in the map. - -|`options.model` | k8s model - -|`options.resource` | The resource to be deleted. - -|`options.path` |Appends as subpath if provided - -|`options.queryParams` |The query parameters to be included in the -URL. - -|`options.requestInit` |The fetch init object to use. This can have -request headers, method, redirect, etc. See link:{power-bi-url}[Interface RequestInit] for more. - - -|`options.json` |Can control garbage collection of resources -explicitly if provided else will default to model's "propagationPolicy". -|=== - -[discrete] -== `k8sListResource` - -Lists the resources as an array in the cluster, based on provided options. It returns a promise that resolves to the response. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`options` |Which are passed as key-value pairs in the map - -|`options.model` |k8s model - -|`options.queryParams` |The query parameters to be included in the -URL and can pass label selector's as well with key "labelSelector". - -|`options.requestInit` |The fetch init object to use. This can have -request headers, method, redirect, etc. See link:{power-bi-url}[Interface RequestInit] for more. -|=== - -[discrete] -== `k8sListResourceItems` - -Same interface as k8sListResource but returns the sub items. It returns the apiVersion for the model, i.e., `group/version`. - -[discrete] -== `getAPIVersionForModel` - -Provides apiVersion for a k8s model. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`model` |k8s model -|=== - -[discrete] -== `getGroupVersionKindForResource` - -Provides a group, version, and kind for a resource. It returns the group, version, kind for the provided resource. If the resource does not have an API group, group "core" will be returned. If the resource has an invalid apiVersion, then it will throw an Error. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resource` |k8s resource -|=== - -[discrete] -== `getGroupVersionKindForModel` - -Provides a group, version, and kind for a k8s model. This returns the group, version, kind for the provided model. If the model does not have an apiGroup, group "core" will be returned. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`model` |k8s model -|=== - -[discrete] -== `StatusPopupSection` - -Component that shows the status in a popup window. Helpful component for building `console.dashboards/overview/health/resource` extensions. - -.Example -[source,tsx] ----- - <StatusPopupSection - firstColumn={ - <> - <span>{title}</span> - <span className="text-secondary"> - My Example Item - </span> - </> - } - secondColumn='Status' - > ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`firstColumn` |values for first column of popup -|`secondColumn` |(optional) values for second column of popup -|`children` |(optional) children for the popup -|=== - -[discrete] -== `StatusPopupItem` - -Status element used in status popup; used in `StatusPopupSection`. - -.Example -[source,text] ----- -<StatusPopupSection - firstColumn='Example' - secondColumn='Status' -> - <StatusPopupItem icon={healthStateMapping[MCGMetrics.state]?.icon}> - Complete - </StatusPopupItem> - <StatusPopupItem icon={healthStateMapping[RGWMetrics.state]?.icon}> - Pending - </StatusPopupItem> -</StatusPopupSection> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`value` |(optional) text value to display -|`icon` |(optional) icon to display -|`children` |child elements -|=== - -[discrete] -== `Overview` - -Creates a wrapper component for a dashboard. - -.Example -[source,text] ----- - <Overview> - <OverviewGrid mainCards={mainCards} leftCards={leftCards} rightCards={rightCards} /> - </Overview> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`className` |(optional) style class for div -|`children` |(optional) elements of the dashboard -|=== - -[discrete] -== `OverviewGrid` - -Creates a grid of card elements for a dashboard; used within `Overview`. - -.Example -[source,text] ----- - <Overview> - <OverviewGrid mainCards={mainCards} leftCards={leftCards} rightCards={rightCards} /> - </Overview> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`mainCards` |cards for grid -|`leftCards` |(optional) cards for left side of grid -|`rightCards` |(optional) cards for right side of grid -|=== - -[discrete] -== `InventoryItem` - -Creates an inventory card item. - -.Example -[source,tsx] ----- - return ( - <InventoryItem> - <InventoryItemTitle>{title}</InventoryItemTitle> - <InventoryItemBody error={loadError}> - {loaded && <InventoryItemStatus count={workerNodes.length} icon={<MonitoringIcon />} />} - </InventoryItemBody> - </InventoryItem> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`children` |elements to render inside the item -|=== - -[discrete] -== `InventoryItemTitle` - -Creates a title for an inventory card item; used within `InventoryItem`. - -.Example -[source,tsx] ----- - return ( - <InventoryItem> - <InventoryItemTitle>{title}</InventoryItemTitle> - <InventoryItemBody error={loadError}> - {loaded && <InventoryItemStatus count={workerNodes.length} icon={<MonitoringIcon />} />} - </InventoryItemBody> - </InventoryItem> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`children` |elements to render inside the title -|=== - -[discrete] -== `InventoryItemBody` - -Creates the body of an inventory card; used within `InventoryCard` and can be used with `InventoryTitle`. - -.Example -[source,tsx] ----- - return ( - <InventoryItem> - <InventoryItemTitle>{title}</InventoryItemTitle> - <InventoryItemBody error={loadError}> - {loaded && <InventoryItemStatus count={workerNodes.length} icon={<MonitoringIcon />} />} - </InventoryItemBody> - </InventoryItem> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`children` |elements to render inside the Inventory Card or title -|`error` |elements of the div -|=== - -[discrete] -== `InventoryItemStatus` - -Creates a count and icon for an inventory card with optional link address; used within `InventoryItemBody` - -.Example -[source,tsx] ----- - return ( - <InventoryItem> - <InventoryItemTitle>{title}</InventoryItemTitle> - <InventoryItemBody error={loadError}> - {loaded && <InventoryItemStatus count={workerNodes.length} icon={<MonitoringIcon />} />} - </InventoryItemBody> - </InventoryItem> - ) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`count` |count for display -|`icon` |icon for display -|`linkTo` |(optional) link address -|=== - -[discrete] -== `InventoryItemLoading` - -Creates a skeleton container for when an inventory card is loading; used with `InventoryItem` and related components - -.Example -[source,tsx] ----- -if (loadError) { - title = <Link to={workerNodesLink}>{t('Worker Nodes')}</Link>; -} else if (!loaded) { - title = <><InventoryItemLoading /><Link to={workerNodesLink}>{t('Worker Nodes')}</Link></>; -} -return ( - <InventoryItem> - <InventoryItemTitle>{title}</InventoryItemTitle> - </InventoryItem> -) ----- - -[discrete] -== `useFlag` - -Hook that returns the given feature flag from FLAGS redux state. It returns the boolean value of the requested feature flag or undefined. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`flag` |The feature flag to return -|=== - -[discrete] -== `YAMLEditor` - -A basic lazy loaded YAML editor with hover help and completion. - -.Example -[source,text] ----- -<React.Suspense fallback={<LoadingBox />}> - <YAMLEditor - value={code} - /> -</React.Suspense> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`value` |String representing the yaml code to render. - -|`options` |Monaco editor options. - -|`minHeight` |Minimum editor height in valid CSS height values. - -|`showShortcuts` |Boolean to show shortcuts on top of the editor. - -|`toolbarLinks` |Array of ReactNode rendered on the toolbar links -section on top of the editor. - -|`onChange` |Callback for on code change event. - -|`onSave` |Callback called when the command CTRL / CMD + S is triggered. - -|`ref` |React reference to `{ editor?: IStandaloneCodeEditor }`. Using -the `editor` property, you are able to access to all methods to control -the editor. -|=== - -[discrete] -== `ResourceYAMLEditor` - -A lazy loaded YAML editor for Kubernetes resources with hover help and completion. The component use the YAMLEditor and add on top of it more functionality likeresource update handling, alerts, save, cancel and reload buttons, accessibility and more. Unless `onSave` callback is provided, the resource update is automatically handled.It should be wrapped in a `React.Suspense` component. - -.Example -[source,text] ----- -<React.Suspense fallback={<LoadingBox />}> - <ResourceYAMLEditor - initialResource={resource} - header="Create resource" - onSave={(content) => updateResource(content)} - /> -</React.Suspense> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`initialResource` |YAML/Object representing a resource to be shown by -the editor. This prop is used only during the inital render - -|`header` |Add a header on top of the YAML editor - -|`onSave` |Callback for the Save button. Passing it will override the -default update performed on the resource by the editor -|=== - -[discrete] -== `ResourceEventStream` - -A component to show events related to a particular resource. - -.Example -[source,tsx] ----- -const [resource, loaded, loadError] = useK8sWatchResource(clusterResource); -return <ResourceEventStream resource={resource} /> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resource` |An object whose related events should be shown. -|=== - -[discrete] -== `usePrometheusPoll` - -Sets up a poll to Prometheus for a single query. It returns a tuple containing the query response, a boolean flag indicating whether the response has completed, and any errors encountered during the request or post-processing of the request. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`\{PrometheusEndpoint} props.endpoint` |one of the -PrometheusEndpoint (label, query, range, rules, targets) - -|`\{string} [props.query]` |(optional) Prometheus query string. If -empty or undefined, polling is not started. - -|`\{number} [props.delay]` |(optional) polling delay interval (ms) - -|`\{number} [props.endTime]` |(optional) for QUERY_RANGE enpoint, end -of the query range - -|`\{number} [props.samples]` |(optional) for QUERY_RANGE enpoint - -|`\{number} [options.timespan]` | (optional) for QUERY_RANGE enpoint - -|`\{string} [options.namespace]` | (optional) a search param to append - -|`\{string} [options.timeout]` | (optional) a search param to append -|=== - -[discrete] -== `Timestamp` - -A component to render timestamp. The timestamps are synchronized between invidual instances of the Timestamp component. The provided timestamp is formatted according to user locale. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`timestamp` |the timestamp to render. Format is expected to be ISO 8601 -(used by Kubernetes), epoch timestamp, or an instance of a Date. - -|`simple` |render simple version of the component omitting icon and -tooltip. - -|`omitSuffix` |formats the date ommiting the suffix. - -|`className` |additional class name for the component. -|=== - -[discrete] -== `useModal` - -A hook to launch Modals. - -.Example -[source,tsx] ----- -const context: AppPage: React.FC = () => {<br/> const [launchModal] = useModal();<br/> const onClick = () => launchModal(ModalComponent);<br/> return (<br/> <Button onClick={onClick}>Launch a Modal</Button><br/> )<br/>}<br/>` ----- - -[discrete] -== `ActionServiceProvider` - -Component that allows to receive contributions from other plugins for the `console.action/provider` extension type. - -.Example -[source,text] ----- - const context: ActionContext = { 'a-context-id': { dataFromDynamicPlugin } }; - - ... - - <ActionServiceProvider context={context}> - {({ actions, options, loaded }) => - loaded && ( - <ActionMenu actions={actions} options={options} variant={ActionMenuVariant.DROPDOWN} /> - ) - } - </ActionServiceProvider> ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`context` |Object with contextId and optional plugin data -|=== - -[discrete] -== `NamespaceBar` - -A component that renders a horizontal toolbar with a namespace dropdown menu in the leftmost position. Additional components can be passed in as children and will be rendered to the right of the namespace dropdown. This component is designed to be used at the top of the page. It should be used on pages where the user needs to be able to change the active namespace, such as on pages with k8s resources. - -.Example -[source,text] ----- - const logNamespaceChange = (namespace) => console.log(`New namespace: ${namespace}`); - - ... - - <NamespaceBar onNamespaceChange={logNamespaceChange}> - <NamespaceBarApplicationSelector /> - </NamespaceBar> - <Page> - - ... ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`onNamespaceChange` |(optional) A function that is executed when a -namespace option is selected. It accepts the new namespace in the form -of a string as its only argument. The active namespace is updated -automatically when an option is selected, but additional logic can be -applied via this function. When the namespace is changed, the namespace -parameter in the URL will be changed from the previous namespace to the -newly selected namespace. - -|`isDisabled` |(optional) A boolean flag that disables the namespace -dropdown if set to true. This option only applies to the namespace -dropdown and has no effect on child components. - -|`children` |(optional) Additional elements to be rendered inside the -toolbar to the right of the namespace dropdown. -|=== - -[discrete] -== `ErrorBoundaryFallbackPage` - -Creates full page ErrorBoundaryFallbackPage component to display the "Oh no! Something went wrong." message along with the stack trace and other helpful debugging information. This is to be used inconjunction with an component. - -.Example -[source,tsx] ----- -//in ErrorBoundary component - return ( - if (this.state.hasError) { - return <ErrorBoundaryFallbackPage errorMessage={errorString} componentStack={componentStackString} - stack={stackTraceString} title={errorString}/>; - } - - return this.props.children; -) ----- - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`errorMessage` |text description of the error message -|`componentStack` |component trace of the exception -|`stack` |stack trace of the exception -|`title` |title to render as the header of the error boundary page -|=== - -[discrete] -== `PerspectiveContext` - -Deprecated: Use the provided `usePerspectiveContext` instead. Creates the perspective context. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`PerspectiveContextType` |object with active perspective and setter -|=== - -[discrete] -== `useAccessReviewAllowed` - -Deprecated: Use `useAccessReview` from `@console/dynamic-plugin-sdk` instead. Hook that provides allowed status about user access to a given resource. It returns the `isAllowed` boolean value. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`resourceAttributes` |resource attributes for access review -|`impersonate` |impersonation details -|=== - -[discrete] -== `useSafetyFirst` - -Deprecated: This hook is not related to console functionality. Hook that ensures a safe asynchronnous setting of React state in case a given component could be unmounted. It returns an array with a pair of state value and its set function. - -[cols=",",options="header",] -|=== -|Parameter Name |Description -|`initialState` |initial state value -|=== - -:!power-bi-url: \ No newline at end of file diff --git a/modules/dynamic-plugin-sdk-extensions.adoc b/modules/dynamic-plugin-sdk-extensions.adoc deleted file mode 100644 index 71cdc5861983..000000000000 --- a/modules/dynamic-plugin-sdk-extensions.adoc +++ /dev/null @@ -1,1475 +0,0 @@ -// Module is included in the following assemblies: -// -// * web_console/dynamic-plugin/dynamic-plugins-reference.adoc - -:_content-type: CONCEPT -[id="dynamic-plugin-sdk-extensions_{context}"] -= Dynamic plugin extension types - -[discrete] -== `console.action/filter` - -`ActionFilter` can be used to filter an action. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`contextId` |`string` |no |The context ID helps to narrow the scope of -contributed actions to a particular area of the application. Examples include `topology` and `helm`. - -|`filter` |`CodeRef<(scope: any, action: Action) => boolean>` |no |A -function that will filter actions based on some conditions. - -`scope`: The scope -in which actions should be provided for. A hook might be required if you want to -remove the `ModifyCount` action from a deployment with a horizontal pod -autoscaler (HPA). -|=== - -[discrete] -== `console.action/group` - -`ActionGroup` contributes an action group that can also be a submenu. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID used to identify the action section. - -|`label` |`string` |yes |The label to display in the UI. Required for -submenus. - -|`submenu` |`boolean` |yes |Whether this group should be displayed as -submenu. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is -used. The `insertBefore` value takes precedence. -|=== - -[discrete] -== `console.action/provider` - -`ActionProvider` contributes a hook that returns list of actions for specific context. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`contextId` |`string` |no |The context ID helps to narrow the scope of -contributed actions to a particular area of the application. Examples include `topology` and `helm`. - -|`provider` |`CodeRef<ExtensionHook<Action[], any>>` |no |A React hook -that returns actions for the given scope. If `contextId` = `resource`, then -the scope will always be a Kubernetes resource object. -|=== - -[discrete] -== `console.action/resource-provider` - -`ResourceActionProvider` contributes a hook that returns list of actions for specific resource model. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sKindVersionModel` |no |The model for which this -provider provides actions for. - -|`provider` |`CodeRef<ExtensionHook<Action[], any>>` |no |A react hook -which returns actions for the given resource model -|=== - -[discrete] -== `console.alert-action` - -This extension can be used to trigger a specific action when a specific Prometheus alert is observed by the Console based on its `rule.name` value. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`alert` |`string` |no | Alert name as defined by `alert.rule.name` property - -|`text` |`string` |no | - -|`action` |`CodeRef<(alert: any) => void>` |no | Function to perform side effect | -|=== - -[discrete] -== `console.catalog/item-filter` - -This extension can be used for plugins to contribute a handler that can filter specific catalog items. For example, the plugin can contribute a filter that filters helm charts from specific provider. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`catalogId` |`string` \| `string[]` |no |The unique identifier for the -catalog this provider contributes to. - -|`type` |`string` |no |Type ID for the catalog item type. - -|`filter` |`CodeRef<(item: CatalogItem) => boolean>` |no |Filters items -of a specific type. Value is a function that takes `CatalogItem[]` and -returns a subset based on the filter criteria. -|=== - -[discrete] -== `console.catalog/item-metadata` - -This extension can be used to contribute a provider that adds extra metadata to specific catalog items. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`catalogId` |`string` \| `string[]` |no |The unique identifier for the -catalog this provider contributes to. - -|`type` |`string` |no |Type ID for the catalog item type. - -|`provider` -|`CodeRef<ExtensionHook<CatalogItemMetadataProviderFunction, CatalogExtensionHookOptions>>` -|no |A hook which returns a function that will be used to provide metadata to catalog items of a specific type. -|=== - -[discrete] -== `console.catalog/item-provider` - -This extension allows plugins to contribute a provider for a catalog item type. For example, a Helm Plugin can add a provider that fetches all the Helm Charts. This extension can also be used by other plugins to add more items to a specific catalog item type. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`catalogId` |`string` \| `string[]` |no |The unique identifier for the -catalog this provider contributes to. - -|`type` |`string` |no |Type ID for the catalog item type. - -|`title` |`string` |no |Title for the catalog item provider - -|`provider` -|`CodeRef<ExtensionHook<CatalogItem<any>[], CatalogExtensionHookOptions>>` -|no |Fetch items and normalize it for the catalog. Value is a react -effect hook. - -|`priority` |`number` |yes |Priority for this provider. Defaults to `0`. -Higher priority providers may override catalog items provided by other -providers. -|=== - -[discrete] -== `console.catalog/item-type` - -This extension allows plugins to contribute a new type of catalog item. For example, a Helm plugin can define a new catalog item type as HelmCharts that it wants to contribute to the Developer Catalog. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`type` |`string` |no |Type for the catalog item. - -|`title` |`string` |no |Title for the catalog item. - -|`catalogDescription` |`string` \| `CodeRef<React.ReactNode>` |yes -|Description for the type specific catalog. - -|`typeDescription` |`string` |yes |Description for the catalog item -type. - -|`filters` |`CatalogItemAttribute[]` |yes |Custom filters specific to -the catalog item. - -|`groupings` |`CatalogItemAttribute[]` |yes |Custom groupings specific -to the catalog item. -|=== - -[discrete] -== `console.catalog/item-type-metadata` - -This extension allows plugins to contribute extra metadata like custom filters or groupings for any catalog item type. For example, a plugin can attach a custom filter for HelmCharts that can filter based on chart provider. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`type` |`string` |no |Type for the catalog item. - -|`filters` |`CatalogItemAttribute[]` |yes |Custom filters specific to -the catalog item. - -|`groupings` |`CatalogItemAttribute[]` |yes |Custom groupings specific -to the catalog item. -|=== - -[discrete] -== `console.cluster-overview/inventory-item` - -Adds a new inventory item into cluster overview page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`component` |`CodeRef<React.ComponentType<{}>>` |no |The component to -be rendered. -|=== - -[discrete] -== `console.cluster-overview/multiline-utilization-item` - -Adds a new cluster overview multi-line utilization item. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The title of the utilization item. - -|`getUtilizationQueries` |`CodeRef<GetMultilineQueries>` |no |Prometheus -utilization query. - -|`humanize` |`CodeRef<Humanize>` |no |Convert Prometheus data to human-readable form. - -|`TopConsumerPopovers` -|`CodeRef<React.ComponentType<TopConsumerPopoverProps>[]>` |yes |Shows -Top consumer popover instead of plain value. -|=== - -[discrete] -== `console.cluster-overview/utilization-item` - -Adds a new cluster overview utilization item. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The title of the utilization item. - -|`getUtilizationQuery` |`CodeRef<GetQuery>` |no |Prometheus utilization -query. - -|`humanize` |`CodeRef<Humanize>` |no |Convert Prometheus data to human-readable form. - -|`getTotalQuery` |`CodeRef<GetQuery>` |yes |Prometheus total query. - -|`getRequestQuery` |`CodeRef<GetQuery>` |yes |Prometheus request query. - -|`getLimitQuery` |`CodeRef<GetQuery>` |yes |Prometheus limit query. - -|`TopConsumerPopover` -|`CodeRef<React.ComponentType<TopConsumerPopoverProps>>` |yes |Shows Top -consumer popover instead of plain value. -|=== - -[discrete] -== `console.context-provider` - -Adds a new React context provider to the web console application root. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`provider` |`CodeRef<Provider<T>>` |no |Context Provider component. -|`useValueHook` |`CodeRef<() => T>` |no |Hook for the Context value. -|=== - -[discrete] -== `console.dashboards/card` - -Adds a new dashboard card. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`tab` |`string` |no |The ID of the dashboard tab to which the card will -be added. - -|`position` |`'LEFT' \| 'RIGHT' \| 'MAIN'` |no |The grid position of the -card on the dashboard. - -|`component` |`CodeRef<React.ComponentType<{}>>` |no |Dashboard card -component. - -|`span` |`OverviewCardSpan` |yes |Card's vertical span in the column. -Ignored for small screens; defaults to `12`. -|=== - -[discrete] -== `console.dashboards/custom/overview/detail/item` - -Adds an item to the Details card of Overview Dashboard. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no | Details card title - -|`component` |`CodeRef<React.ComponentType<{}>>` |no | The value, rendered by the OverviewDetailItem component - -|`valueClassName` |`string` |yes | Value for a className - -|`isLoading` |`CodeRef<() => boolean>` |yes | Function returning the loading state of the component - -| `error` | `CodeRef<() => string>` | yes | Function returning errors to be displayed by the component -|=== - -[discrete] -== `console.dashboards/overview/activity/resource` - -Adds an activity to the Activity Card of Overview Dashboard where the triggering of activity is based on watching a Kubernetes resource. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`k8sResource` |`CodeRef<FirehoseResource & { isList: true; }>` |no |The -utilization item to be replaced. - -|`component` |`CodeRef<React.ComponentType<K8sActivityProps<T>>>` |no -|The action component. - -|`isActivity` |`CodeRef<(resource: T) => boolean>` |yes |Function which -determines if the given resource represents the action. If not defined, -every resource represents activity. - -|`getTimestamp` |`CodeRef<(resource: T) => Date>` |yes |Time stamp for -the given action, which will be used for ordering. -|=== - -[discrete] -== `console.dashboards/overview/health/operator` - -Adds a health subsystem to the status card of the *Overview* dashboard, where the source of status is a Kubernetes REST API. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |Title of Operators section in the pop-up menu. - -|`resources` |`CodeRef<FirehoseResource[]>` |no |Kubernetes resources -which will be fetched and passed to `healthHandler`. - -|`getOperatorsWithStatuses` |`CodeRef<GetOperatorsWithStatuses<T>>` |yes -|Resolves status for the Operators. - -|`operatorRowLoader` -|`CodeRef<React.ComponentType<OperatorRowProps<T>>>` |yes |Loader for -pop-up row component. - -|`viewAllLink` |`string` |yes |Links to all resources page. If not -provided, then a list page of the first resource from resources prop is -used. -|=== - -[discrete] -== `console.dashboards/overview/health/prometheus` - -Adds a health subsystem to the status card of Overview dashboard where the source of status is Prometheus. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The display name of the subsystem. - -|`queries` |`string[]` |no |The Prometheus queries. - -|`healthHandler` |`CodeRef<PrometheusHealthHandler>` |no |Resolve the -subsystem's health. - -|`additionalResource` |`CodeRef<FirehoseResource>` |yes |Additional -resource which will be fetched and passed to `healthHandler`. - -|`popupComponent` -|`CodeRef<React.ComponentType<PrometheusHealthPopupProps>>` |yes |Loader -for pop-up menu content. If defined, a health item is represented as a -link, which opens a pop-up menu with the given content. - -|`popupTitle` |`string` |yes |The title of the popover. - -|`disallowedControlPlaneTopology` |`string[]` |yes |Control plane -topology for which the subsystem should be hidden. -|=== - -[discrete] -== `console.dashboards/overview/health/resource` - -Adds a health subsystem to the status card of Overview dashboard where the source of status is a Kubernetes Resource. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The display name of the subsystem. - -|`resources` |`CodeRef<WatchK8sResources<T>>` |no |Kubernetes resources -that will be fetched and passed to `healthHandler`. - -|`healthHandler` |`CodeRef<ResourceHealthHandler<T>>` |no |Resolve the -subsystem's health. - -|`popupComponent` |`CodeRef<WatchK8sResults<T>>` |yes |Loader for pop-up menu content. If defined, a health item is represented as a link, which -opens a pop-up menu with the given content. - -|`popupTitle` |`string` |yes |The title of the popover. -|=== - -[discrete] -== `console.dashboards/overview/health/url` - -Adds a health subsystem to the status card of Overview dashboard where the source of status is a Kubernetes REST API. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The display name of the subsystem. - -|`url` |`string` |no |The URL to fetch data from. It will be prefixed -with base Kubernetes URL. - -|`healthHandler`|`CodeRef<URLHealthHandler<T, K8sResourceCommon \| K8sResourceCommon[]>>`|no |Resolve the subsystem's health. - -|`additionalResource` |`CodeRef<FirehoseResource>` |yes |Additional -resource which will be fetched and passed to `healthHandler`. - -|`popupComponent`|`CodeRef<React.ComponentType<{ healthResult?: T; healthResultError?: any; k8sResult?: FirehoseResult<R>; }>>`|yes |Loader for popup content. If defined, a health item will be -represented as a link which opens popup with given content. - -|`popupTitle` |`string` |yes |The title of the popover. -|=== - -[discrete] -== `console.dashboards/overview/inventory/item` - -Adds a resource tile to the overview inventory card. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`CodeRef<T>` |no |The model for `resource` which will be -fetched. Used to get the model's `label` or `abbr`. - -|`mapper` |`CodeRef<StatusGroupMapper<T, R>>` |yes |Function which maps -various statuses to groups. - -|`additionalResources` |`CodeRef<WatchK8sResources<R>>` |yes |Additional -resources which will be fetched and passed to the `mapper` function. -|=== - -[discrete] -== `console.dashboards/overview/inventory/item/group` - -Adds an inventory status group. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |The ID of the status group. - -|`icon` -|`CodeRef<React.ReactElement<any, string` \| `React.JSXElementConstructor<any>>>` -|no |React component representing the status group icon. -|=== - -[discrete] -== `console.dashboards/overview/inventory/item/replacement` - -Replaces an overview inventory card. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`CodeRef<T>` |no |The model for `resource` which will be fetched. Used to get the model's `label` or `abbr`. - -|`mapper` |`CodeRef<StatusGroupMapper<T, R>>` |yes |Function which maps -various statuses to groups. - -|`additionalResources` |`CodeRef<WatchK8sResources<R>>` |yes |Additional -resources which will be fetched and passed to the `mapper` function. -|=== - -[discrete] -== `console.dashboards/overview/prometheus/activity/resource` - -Adds an activity to the Activity Card of Prometheus Overview Dashboard where the triggering of activity is based on watching a Kubernetes resource. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`queries` |`string[]` |no |Queries to watch. - -|`component` |`CodeRef<React.ComponentType<PrometheusActivityProps>>` -|no |The action component. - -|`isActivity` |`CodeRef<(results: PrometheusResponse[]) => boolean>` -|yes |Function which determines if the given resource represents the -action. If not defined, every resource represents activity. -|=== - -[discrete] -== `console.dashboards/project/overview/item` - -Adds a resource tile to the project overview inventory card. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`CodeRef<T>` |no |The model for `resource` which will be -fetched. Used to get the model's `label` or `abbr`. - -|`mapper` |`CodeRef<StatusGroupMapper<T, R>>` |yes |Function which maps -various statuses to groups. - -|`additionalResources` |`CodeRef<WatchK8sResources<R>>` |yes |Additional -resources which will be fetched and passed to the `mapper` function. -|=== - -[discrete] -== `console.dashboards/tab` - -Adds a new dashboard tab, placed after the *Overview* tab. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique tab identifier, used as tab link `href` -and when adding cards to this tab. - -|`navSection` |`'home' \| 'storage'` |no |Navigation section to which the tab belongs to. - -|`title` |`string` |no |The title of the tab. -|=== - -[discrete] -== `console.file-upload` - -This extension can be used to provide a handler for the file drop action on specific file extensions. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`fileExtensions` |`string[]` |no |Supported file extensions. - -|`handler` |`CodeRef<FileUploadHandler>` |no |Function which handles the -file drop action. -|=== - -[discrete] -== `console.flag` - -Gives full control over the web console feature flags. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`handler` |`CodeRef<FeatureFlagHandler>` |no |Used to set or unset arbitrary feature flags. -|=== - -[discrete] -== `console.flag/hookProvider` - -Gives full control over the web console feature flags with hook handlers. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`handler` |`CodeRef<FeatureFlagHandler>` |no |Used to set or unset arbitrary feature flags. -|=== - -[discrete] -== `console.flag/model` - -Adds a new web console feature flag driven by the presence of a `CustomResourceDefinition` (CRD) object on the cluster. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`flag` |`string` |no |The name of the flag to set after the CRD is detected. - -|`model` |`ExtensionK8sModel` |no |The model which refers to a -CRD. -|=== - -[discrete] -== `console.global-config` - -This extension identifies a resource used to manage the configuration of the cluster. A link to the resource will be added to the *Administration* -> *Cluster Settings* -> *Configuration* page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |Unique identifier for the cluster config resource -instance. - -|`name` |`string` |no |The name of the cluster config resource instance. - -|`model` |`ExtensionK8sModel` |no |The model which refers to a cluster -config resource. - -|`namespace` |`string` |no |The namespace of the cluster config resource -instance. -|=== - -[discrete] -== `console.model-metadata` - -Customize the display of models by overriding values retrieved and generated through API discovery. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sGroupModel` |no |The model to customize. May -specify only a group, or optional version and kind. - -|`badge` |`ModelBadge` |yes |Whether to consider this model reference as -Technology Preview or Developer Preview. - -|`color` |`string` |yes |The color to associate to this model. - -|`label` |`string` |yes |Override the label. Requires `kind` be -provided. - -|`labelPlural` |`string` |yes |Override the plural label. Requires -`kind` be provided. - -|`abbr` |`string` |yes |Customize the abbreviation. Defaults to all -uppercase characters in `kind`, up to 4 characters long. Requires that `kind` is provided. -|=== - -[discrete] -== `console.navigation/href` - -This extension can be used to contribute a navigation item that points to a specific link in the UI. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this item. - -|`name` |`string` |no |The name of this item. - -|`href` |`string` |no |The link `href` value. - -|`perspective` |`string` |yes |The perspective ID to which this item -belongs to. If not specified, contributes to the default perspective. - -|`section` |`string` |yes |Navigation section to which this item belongs -to. If not specified, render this item as a top level link. - -|`dataAttributes` |`{ [key: string]: string; }` |yes |Adds data -attributes to the DOM. - -|`startsWith` |`string[]` |yes |Mark this item as active when the URL -starts with one of these paths. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is used. -`insertBefore` takes precedence. - -|`namespaced` |`boolean` |yes |If `true`, adds `/ns/active-namespace` to the end. - -|`prefixNamespaced` |`boolean` |yes |If `true`, adds `/k8s/ns/active-namespace` to the beginning. -|=== - -[discrete] -== `console.navigation/resource-cluster` - -This extension can be used to contribute a navigation item that points to a cluster resource details page. The K8s model of that resource can be used to define the navigation item. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this item. - -|`model` |`ExtensionK8sModel` |no |The model for which this navigation item -links to. - -|`perspective` |`string` |yes |The perspective ID to which this item -belongs to. If not specified, contributes to the default perspective. - -|`section` |`string` |yes |Navigation section to which this item belongs -to. If not specified, render this item as a top-level link. - -|`dataAttributes` |`{ [key: string]: string; }` |yes |Adds data -attributes to the DOM. - -|`startsWith` |`string[]` |yes |Mark this item as active when the URL -starts with one of these paths. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is used. -`insertBefore` takes precedence. - -|`name` |`string` |yes |Overrides the default name. If not supplied the -name of the link will equal the plural value of the model. -|=== - -[discrete] -== `console.navigation/resource-ns` - -This extension can be used to contribute a navigation item that points to a namespaced resource details page. The K8s model of that resource can be used to define the navigation item. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this item. - -|`model` |`ExtensionK8sModel` |no |The model for which this navigation item -links to. - -|`perspective` |`string` |yes |The perspective ID to which this item -belongs to. If not specified, contributes to the default perspective. - -|`section` |`string` |yes |Navigation section to which this item belongs -to. If not specified, render this item as a top-level link. - -|`dataAttributes` |`{ [key: string]: string; }` |yes |Adds data -attributes to the DOM. - -|`startsWith` |`string[]` |yes |Mark this item as active when the URL -starts with one of these paths. - -|`insertBefore` |`string \| string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is used. -`insertBefore` takes precedence. - -|`name` |`string` |yes |Overrides the default name. If not supplied the -name of the link will equal the plural value of the model. -|=== - -[discrete] -== `console.navigation/section` - -This extension can be used to define a new section of navigation items in the navigation tab. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this item. - -|`perspective` |`string` |yes |The perspective ID to which this item -belongs to. If not specified, contributes to the default perspective. - -|`dataAttributes` |`{ [key: string]: string; }` |yes |Adds data -attributes to the DOM. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is used. -`insertBefore` takes precedence. - -|`name` |`string` |yes |Name of this section. If not supplied, only a -separator will be shown above the section. -|=== - -[discrete] -== `console.navigation/separator` - -This extension can be used to add a separator between navigation items in the navigation. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this item. - -|`perspective` |`string` |yes |The perspective ID to which this item -belongs to. If not specified, contributes to the default perspective. - -|`section` |`string` |yes |Navigation section to which this item belongs -to. If not specified, render this item as a top level link. - -|`dataAttributes` |`{ [key: string]: string; }` |yes |Adds data -attributes to the DOM. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is used. -`insertBefore` takes precedence. -|=== - -[discrete] -== `console.page/resource/details` - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sGroupKindModel` |no |The model for which this -resource page links to. - -|`component` -|`CodeRef<React.ComponentType<{ match: match<{}>; namespace: string; model: ExtensionK8sModel; }>>` -|no |The component to be rendered when the route matches. -|=== - -[discrete] -== `console.page/resource/list` - -Adds new resource list page to Console router. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sGroupKindModel` |no |The model for which this -resource page links to. - -|`component` -|`CodeRef<React.ComponentType<{ match: match<{}>; namespace: string; model: ExtensionK8sModel; }>>` -|no |The component to be rendered when the route matches. -|=== - -[discrete] -== `console.page/route` - -Adds a new page to the web console router. See link:https://v5.reactrouter.com/[React Router]. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`component` -|`CodeRef<React.ComponentType<RouteComponentProps<{}, StaticContext, any>>>` -|no |The component to be rendered when the route matches. - -|`path` |`string` \| `string[]` |no |Valid URL path or array of paths that -`path-to-regexp@^1.7.0` understands. - -|`perspective` |`string` |yes |The perspective to which this page -belongs to. If not specified, contributes to all perspectives. - -|`exact` |`boolean` |yes |When true, will only match if the path matches -the `location.pathname` exactly. -|=== - -[discrete] -== `console.page/route/standalone` - -Adds a new standalone page, rendered outside the common page layout, to the web console router. See link:https://v5.reactrouter.com/[React Router]. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`component` -|`CodeRef<React.ComponentType<RouteComponentProps<{}, StaticContext, any>>>` -|no |The component to be rendered when the route matches. - -|`path` |`string` \| `string[]` |no |Valid URL path or array of paths that -`path-to-regexp@^1.7.0` understands. - -|`exact` |`boolean` |yes |When true, will only match if the path matches -the `location.pathname` exactly. -|=== - -[discrete] -== `console.perspective` - -This extension contributes a new perspective to the console, which enables customization of the navigation menu. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |The perspective identifier. - -|`name` |`string` |no |The perspective display name. - -|`icon` |`CodeRef<LazyComponent>` |no |The perspective display icon. - -|`landingPageURL` -|`CodeRef<(flags: { [key: string]: boolean; }, isFirstVisit: boolean) => string>` -|no |The function to get perspective landing page URL. - -|`importRedirectURL` |`CodeRef<(namespace: string) => string>` |no |The -function to get redirect URL for import flow. - -|`default` |`boolean` |yes |Whether the perspective is the default. -There can only be one default. - -|`defaultPins` |`ExtensionK8sModel[]` |yes |Default pinned resources on -the nav - -|`usePerspectiveDetection` |`CodeRef<() => [boolean, boolean]>` |yes -|The hook to detect default perspective -|=== - -[discrete] -== `console.project-overview/inventory-item` - -Adds a new inventory item into the *Project Overview* page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`component` |`CodeRef<React.ComponentType<{ projectName: string; }>>` -|no |The component to be rendered. -|=== - -[discrete] -== `console.project-overview/utilization-item` - -Adds a new project overview utilization item. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`title` |`string` |no |The title of the utilization item. - -|`getUtilizationQuery` |`CodeRef<GetProjectQuery>` |no |Prometheus -utilization query. - -|`humanize` |`CodeRef<Humanize>` |no |Convert Prometheus data to human-readable form. - -|`getTotalQuery` |`CodeRef<GetProjectQuery>` |yes |Prometheus total -query. - -|`getRequestQuery` |`CodeRef<GetProjectQuery>` |yes |Prometheus request -query. - -|`getLimitQuery` |`CodeRef<GetProjectQuery>` |yes |Prometheus limit -query. - -|`TopConsumerPopover` -|`CodeRef<React.ComponentType<TopConsumerPopoverProps>>` |yes |Shows the top consumer popover instead of plain value. -|=== - -[discrete] -== `console.pvc/alert` - -This extension can be used to contribute custom alerts on the PVC details page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`alert` |`CodeRef<React.ComponentType<{ pvc: K8sResourceCommon; }>>` -|no |The alert component. -|=== - -[discrete] -== `console.pvc/create-prop` - -This extension can be used to specify additional properties that will be used when creating PVC resources on the PVC list page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`label` |`string` |no |Label for the create prop action. -|`path` |`string` |no |Path for the create prop action. -|=== - -[discrete] -== `console.pvc/delete` - -This extension allows hooking into deleting PVC resources. It can provide an alert with additional information and custom PVC delete logic. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`predicate` |`CodeRef<(pvc: K8sResourceCommon) => boolean>` |no -|Predicate that tells whether to use the extension or not. - -|`onPVCKill` |`CodeRef<(pvc: K8sResourceCommon) => Promise<void>>` |no -|Method for the PVC delete operation. - -|`alert` |`CodeRef<React.ComponentType<{ pvc: K8sResourceCommon; }>>` -|no |Alert component to show additional information. -|=== - -[discrete] -== `console.pvc/status` - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`priority` |`number` |no |Priority for the status component. A larger value means higher priority. - -|`status` |`CodeRef<React.ComponentType<{ pvc: K8sResourceCommon; }>>` -|no |The status component. - -|`predicate` |`CodeRef<(pvc: K8sResourceCommon) => boolean>` |no -|Predicate that tells whether to render the status component or not. -|=== - -[discrete] -== `console.redux-reducer` - -Adds new reducer to Console Redux store which operates on `plugins.<scope>` substate. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`scope` |`string` |no |The key to represent the reducer-managed -substate within the Redux state object. - -|`reducer` |`CodeRef<Reducer<any, AnyAction>>` |no |The reducer -function, operating on the reducer-managed substate. -|=== - -[discrete] -== `console.resource/create` - -This extension allows plugins to provide a custom component (i.e., wizard or form) for specific resources, which will be rendered, when users try to create a new resource instance. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sModel` |no |The model for which this create -resource page will be rendered - -|`component` -|`CodeRef<React.ComponentType<CreateResourceComponentProps>>` |no |The -component to be rendered when the model matches -|=== - -[discrete] -== `console.storage-class/provisioner` - -Adds a new storage class provisioner as an option during storage class creation. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`CSI` |`ProvisionerDetails` |yes | Container Storage Interface provisioner type - -|`OTHERS` -|`ProvisionerDetails` -|yes -|Other provisioner type -|=== - -[discrete] -== `console.storage-provider` - -This extension can be used to contribute a new storage provider to select, when attaching storage and a provider specific component. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`name` |`string` |no | Displayed name of the provider. - -|`Component` -|`CodeRef<React.ComponentType<Partial<RouteComponentProps<{}, StaticContext, any>>>>` -|no | Provider specific component to render. | -|=== - -[discrete] -== `console.tab` - -Adds a tab to a horizontal nav matching the `contextId`. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`contextId` |`string` |no | Context ID assigned to the horizontal nav in which the tab will be injected. Possible values: `dev-console-observe` - - -|`name` |`string` |no | The display label of the tab - -|`href` |`string` |no | The `href` appended to the existing URL - -|`component` -|`CodeRef<React.ComponentType<PageComponentProps<K8sResourceCommon>>>` -|no |Tab content component. -|=== - -[discrete] -== `console.tab/horizontalNav` - -This extension can be used to add a tab on the resource details page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sKindVersionModel` |no |The model for which this -provider show tab. - -|`page` |`{ name: string; href: string; }` |no |The page to be show in -horizontal tab. It takes tab name as name and href of the tab - -|`component` -|`CodeRef<React.ComponentType<PageComponentProps<K8sResourceCommon>>>` -|no |The component to be rendered when the route matches. -|=== - -[discrete] -== `console.telemetry/listener` - -This component can be used to register a listener function receiving telemetry events. These events include user identification, page navigation, and other application specific events. The listener may use this data for reporting and analytics purposes. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`listener` |`CodeRef<TelemetryEventListener>` |no |Listen for telemetry -events -|=== - -[discrete] -== `console.topology/adapter/build` - -`BuildAdapter` contributes an adapter to adapt element to data that can be used by the Build component. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`adapt` -|`CodeRef<(element: GraphElement) => AdapterDataType<BuildConfigData> \| undefined>` -|no |Adapter to adapt element to data that can be used by Build component. -|=== - -[discrete] -== `console.topology/adapter/network` - -`NetworkAdapater` contributes an adapter to adapt element to data that can be used by the `Networking` component. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`adapt` -|`CodeRef<(element: GraphElement) => NetworkAdapterType \| undefined>` -|no |Adapter to adapt element to data that can be used by Networking component. -|=== - -[discrete] -== `console.topology/adapter/pod` - -`PodAdapter` contributes an adapter to adapt element to data that can be used by the `Pod` component. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`adapt` -|`CodeRef<(element: GraphElement) => AdapterDataType<PodsAdapterDataType> \| undefined>` -|no |Adapter to adapt element to data that can be used by Pod component. | -|=== - -[discrete] -== `console.topology/component/factory` - -Getter for a `ViewComponentFactory`. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`getFactory` |`CodeRef<ViewComponentFactory>` |no |Getter for a `ViewComponentFactory`. -|=== - -[discrete] -== `console.topology/create/connector` - -Getter for the create connector function. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`getCreateConnector` |`CodeRef<CreateConnectionGetter>` |no |Getter for -the create connector function. -|=== - -[discrete] -== `console.topology/data/factory` - -Topology Data Model Factory Extension - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |Unique ID for the factory. - -|`priority` |`number` |no |Priority for the factory - -|`resources` |`WatchK8sResourcesGeneric` |yes |Resources to be fetched -from `useK8sWatchResources` hook. - -|`workloadKeys` |`string[]` |yes |Keys in resources containing -workloads. - -|`getDataModel` |`CodeRef<TopologyDataModelGetter>` |yes |Getter for the -data model factory. - -|`isResourceDepicted` |`CodeRef<TopologyDataModelDepicted>` |yes |Getter -for function to determine if a resource is depicted by this model factory. - -|`getDataModelReconciler` |`CodeRef<TopologyDataModelReconciler>` |yes -|Getter for function to reconcile data model after all extensions' models have loaded. -|=== - -[discrete] -== `console.topology/decorator/provider` - -Topology Decorator Provider Extension - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID for topology decorator specific to the extension -|`priority` |`number` |no |Priority for topology decorator specific to the extension -|`quadrant` |`TopologyQuadrant` |no |Quadrant for topology decorator specific to the extension -|`decorator` |`CodeRef<TopologyDecoratorGetter>` |no |Decorator specific to the extension | -|=== - -[discrete] -== `console.topology/details/resource-alert` - -`DetailsResourceAlert` contributes an alert for specific topology context or graph element. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |The ID of this alert. Used to save state if the -alert should not be shown after dismissed. - -|`contentProvider` -|`CodeRef<(element: GraphElement) => DetailsResourceAlertContent \| null>` -|no |Hook to return the contents of the alert. -|=== - -[discrete] -== `console.topology/details/resource-link` - -`DetailsResourceLink` contributes a link for specific topology context or graph element. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`link`|`CodeRef<(element: GraphElement) => React.Component \| undefined>` |no -|Return the resource link if provided, otherwise undefined. Use the `ResourceIcon` and `ResourceLink` properties for styles. - -|`priority` |`number` |yes |A higher priority factory will get the first -chance to create the link. -|=== - -[discrete] -== `console.topology/details/tab` - -`DetailsTab` contributes a tab for the topology details panel. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this details tab. - -|`label` |`string` |no |The tab label to display in the UI. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is -used. The `insertBefore` value takes precedence. -|=== - -[discrete] -== `console.topology/details/tab-section` - -`DetailsTabSection` contributes a section for a specific tab in the topology details panel. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |A unique identifier for this details tab section. - -|`tab` |`string` |no |The parent tab ID that this section should -contribute to. - -|`provider` |`CodeRef<DetailsTabSectionExtensionHook>` |no |A hook that -returns a component, or if null or undefined, renders in the -topology sidebar. SDK component: `<Section title=\{}>...` padded area - -|`section`|`CodeRef<(element: GraphElement, renderNull?: () => null) => React.Component \| undefined>` -|no |Deprecated: Fallback if no provider is defined. renderNull is a -no-op already. - -|`insertBefore` |`string` \| `string[]` |yes |Insert this item before the -item referenced here. For arrays, the first one found in order is used. - -|`insertAfter` |`string` \| `string[]` |yes |Insert this item after the -item referenced here. For arrays, the first one found in order is -used. The `insertBefore` value takes precedence. -|=== - -[discrete] -== `console.topology/display/filters` - -Topology Display Filters Extension - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`getTopologyFilters` |`CodeRef<() => TopologyDisplayOption[]>` |no | Getter for topology filters specific to the extension -|`applyDisplayOptions` |`CodeRef<TopologyApplyDisplayOptions>` |no | Function to apply filters to the model -|=== - -[discrete] -== `console.topology/relationship/provider` - -Topology relationship provider connector extension - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`provides` |`CodeRef<RelationshipProviderProvides>` |no |Use to determine if a connection can be created between the source and target node -|`tooltip` |`string` |no |Tooltip to show when connector operation is hovering over the drop target, for example, "Create a Visual Connector" -|`create` |`CodeRef<RelationshipProviderCreate>` |no |Callback to execute when connector is drop over target node to create a connection -|`priority` |`number` |no |Priority for relationship, higher will be preferred in case of multiple -|=== - -[discrete] -== `console.user-preference/group` - -This extension can be used to add a group on the console user-preferences page. It will appear as a vertical tab option on the console user-preferences page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID used to identify the user preference group. - -|`label` |`string` |no |The label of the user preference group - -|`insertBefore` |`string` |yes |ID of user preference group before which -this group should be placed - -|`insertAfter` |`string` |yes |ID of user preference group after which -this group should be placed -|=== - -[discrete] -== `console.user-preference/item` - -This extension can be used to add an item to the user preferences group on the console user preferences page. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID used to identify the user preference item and -referenced in insertAfter and insertBefore to define the item order - -|`label` |`string` |no |The label of the user preference - -|`description` |`string` |no |The description of the user preference - -|`field` |`UserPreferenceField` |no |The input field options used to -render the values to set the user preference - -|`groupId` |`string` |yes |IDs used to identify the user preference -groups the item would belong to - -|`insertBefore` |`string` |yes |ID of user preference item before which -this item should be placed - -|`insertAfter` |`string` |yes |ID of user preference item after which -this item should be placed -|=== - -[discrete] -== `console.yaml-template` - -YAML templates for editing resources via the yaml editor. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sModel` |no |Model associated with the template. - -|`template` |`CodeRef<string>` |no |The YAML template. - -|`name` |`string` |no |The name of the template. Use the name `default` -to mark this as the default template. -|=== - -[discrete] -== `dev-console.add/action` - -This extension allows plugins to contribute an add action item to the add page of developer perspective. For example, a Serverless plugin can add a new action item for adding serverless functions to the add page of developer console. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID used to identify the action. - -|`label` |`string` |no |The label of the action. - -|`description` |`string` |no |The description of the action. - -|`href` |`string` |no |The `href` to navigate to. - -|`groupId` |`string` |yes |IDs used to identify the action groups the -action would belong to. - -|`icon` |`CodeRef<React.ReactNode>` |yes |The perspective display icon. - -|`accessReview` |`AccessReviewResourceAttributes[]` |yes |Optional -access review to control the visibility or enablement of the action. -|=== - -[discrete] -== `dev-console.add/action-group` - -This extension allows plugins to contibute a group in the add page of developer console. Groups can be referenced by actions, which will be grouped together in the add action page based on their extension definition. For example, a Serverless plugin can contribute a Serverless group and together with multiple add actions. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`id` |`string` |no |ID used to identify the action group - -|`name` |`string` |no |The title of the action group - -|`insertBefore` |`string` |yes |ID of action group before which this -group should be placed - -|`insertAfter` |`string` |yes |ID of action group after which this group -should be placed -|=== - -[discrete] -== `dev-console.import/environment` - -This extension can be used to specify extra build environment variable fields under the builder image selector in the developer console git import form. When set, the fields will override environment variables of the same name in the build section. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`imageStreamName` |`string` |no |Name of the image stream to provide -custom environment variables for - -|`imageStreamTags` |`string[]` |no |List of supported image stream tags - -|`environments` |`ImageEnvironment[]` |no |List of environment variables -|=== - -[discrete] -== `console.dashboards/overview/detail/item` - -Deprecated. use `CustomOverviewDetailItem` type instead - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`component` |`CodeRef<React.ComponentType<{}>>` |no |The value, based -on the `DetailItem` component -|=== - -[discrete] -== `console.page/resource/tab` - -Deprecated. Use `console.tab/horizontalNav` instead. Adds a new resource tab page to Console router. - -[cols=",,,",options="header",] -|=== -|Name |Value Type |Optional |Description -|`model` |`ExtensionK8sGroupKindModel` |no |The model for which this -resource page links to. - -|`component` -|`CodeRef<React.ComponentType<RouteComponentProps<{}, StaticContext, any>>>` -|no |The component to be rendered when the route matches. - -|`name` |`string` |no |The name of the tab. - -|`href` |`string` |yes |The optional `href` for the tab link. If not -provided, the first `path` is used. - -|`exact` |`boolean` |yes |When true, will only match if the path matches -the `location.pathname` exactly. -|=== diff --git a/modules/dynamic-provisioning-about.adoc b/modules/dynamic-provisioning-about.adoc deleted file mode 100644 index dc6ac59dbbdf..000000000000 --- a/modules/dynamic-provisioning-about.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc -// * microshift_storage/dynamic-provisioning-microshift.adoc - -:_content-type: CONCEPT -[id="about_{context}"] -= About dynamic provisioning - -The `StorageClass` resource object describes and classifies storage that can -be requested, as well as provides a means for passing parameters for -dynamically provisioned storage on demand. `StorageClass` objects can also -serve as a management mechanism for controlling different levels of -storage and access to the storage. Cluster Administrators (`cluster-admin`) - or Storage Administrators (`storage-admin`) define and create the -`StorageClass` objects that users can request without needing any detailed -knowledge about the underlying storage volume sources. - -The {product-title} persistent volume framework enables this functionality -and allows administrators to provision a cluster with persistent storage. -The framework also gives users a way to request those resources without -having any knowledge of the underlying infrastructure. - -Many storage types are available for use as persistent volumes in -{product-title}. While all of them can be statically provisioned by an -administrator, some types of storage are created dynamically using the -built-in provider and plugin APIs. diff --git a/modules/dynamic-provisioning-annotations.adoc b/modules/dynamic-provisioning-annotations.adoc deleted file mode 100644 index 52479a617ed5..000000000000 --- a/modules/dynamic-provisioning-annotations.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc -// * microshift_storage/dynamic-provisioning-microshift.adoc - - -[id="storage-class-annotations_{context}"] -= Storage class annotations - -To set a storage class as the cluster-wide default, add -the following annotation to your storage class metadata: - -[source,yaml] ----- -storageclass.kubernetes.io/is-default-class: "true" ----- - -For example: - -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - annotations: - storageclass.kubernetes.io/is-default-class: "true" -... ----- - -This enables any persistent volume claim (PVC) that does not specify a -specific storage class to automatically be provisioned through the -default storage class. However, your cluster can have more than one storage class, but only one of them can be the default storage class. - -[NOTE] -==== -The beta annotation `storageclass.beta.kubernetes.io/is-default-class` is -still working; however, it will be removed in a future release. -==== - -To set a storage class description, add the following annotation -to your storage class metadata: - -[source,yaml] ----- -kubernetes.io/description: My Storage Class Description ----- - -For example: - -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - annotations: - kubernetes.io/description: My Storage Class Description -... ----- diff --git a/modules/dynamic-provisioning-available-plugins.adoc b/modules/dynamic-provisioning-available-plugins.adoc deleted file mode 100644 index 9b6559429b36..000000000000 --- a/modules/dynamic-provisioning-available-plugins.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="available-plug-ins_{context}"] -= Available dynamic provisioning plugins - -{product-title} provides the following provisioner plugins, which have -generic implementations for dynamic provisioning that use the cluster's -configured provider's API to create new storage resources: - - -[options="header",cols="1,1,1"] -|=== - -|Storage type -|Provisioner plugin name -|Notes - -ifndef::openshift-dedicated,openshift-rosa[] -|{rh-openstack-first} Cinder -|`kubernetes.io/cinder` -| - -|{rh-openstack} Manila Container Storage Interface (CSI) -|`manila.csi.openstack.org` -|Once installed, the OpenStack Manila CSI Driver Operator and ManilaDriver automatically create the required storage classes for all available Manila share types needed for dynamic provisioning. -endif::openshift-dedicated,openshift-rosa[] - -|AWS Elastic Block Store (EBS) -|`kubernetes.io/aws-ebs` -|For dynamic provisioning when using multiple clusters in different zones, -tag each node with `Key=kubernetes.io/cluster/<cluster_name>,Value=<cluster_id>` -where `<cluster_name>` and `<cluster_id>` are unique per cluster. - -ifndef::openshift-dedicated,openshift-rosa[] -|Azure Disk -|`kubernetes.io/azure-disk` -| - -|Azure File -|`kubernetes.io/azure-file` -|The `persistent-volume-binder` service account requires permissions to create -and get secrets to store the Azure storage account and keys. -endif::openshift-dedicated,openshift-rosa[] - -ifndef::openshift-rosa[] -|GCE Persistent Disk (gcePD) -|`kubernetes.io/gce-pd` -|In multi-zone configurations, it is advisable to run one {product-title} -cluster per GCE project to avoid PVs from being created in zones where -no node in the current cluster exists. - -|{ibmpowerProductName} Virtual Server Block -|`powervs.csi.ibm.com` -|After installation, the IBM Power Virtual Server Block CSI Driver Operator and IBM Power Virtual Server Block CSI Driver automatically create the required storage classes for dynamic provisioning. -endif::openshift-rosa[] - -//|GlusterFS -//|`kubernetes.io/glusterfs` -//| - -//|Ceph RBD -//|`kubernetes.io/rbd` -//| - -//|Trident from NetApp -//|`netapp.io/trident` -//|Storage orchestrator for NetApp ONTAP, SolidFire, and E-Series storage. - -ifndef::openshift-dedicated,openshift-rosa[] -|link:https://www.vmware.com/support/vsphere.html[VMware vSphere] -|`kubernetes.io/vsphere-volume` -| -endif::openshift-dedicated,openshift-rosa[] - -//|HPE Nimble Storage -//|`hpe.com/nimble` -//|Dynamic provisioning of HPE Nimble Storage resources using the -//HPE Nimble Kube Storage Controller. - -|=== - -[IMPORTANT] -==== -Any chosen provisioner plugin also requires configuration for the relevant -cloud, host, or third-party provider as per the relevant documentation. -==== diff --git a/modules/dynamic-provisioning-aws-definition.adoc b/modules/dynamic-provisioning-aws-definition.adoc deleted file mode 100644 index 025245513c64..000000000000 --- a/modules/dynamic-provisioning-aws-definition.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="aws-definition_{context}"] -= AWS Elastic Block Store (EBS) object definition - -.aws-ebs-storageclass.yaml -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: <storage-class-name> <1> -provisioner: kubernetes.io/aws-ebs -parameters: - type: io1 <2> - iopsPerGB: "10" <3> - encrypted: "true" <4> - kmsKeyId: keyvalue <5> - fsType: ext4 <6> ----- -<1> (required) Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> (required) Select from `io1`, `gp3`, `sc1`, `st1`. The default is `gp3`. -See the -link:http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html[AWS documentation] -for valid Amazon Resource Name (ARN) values. -<3> Optional: Only for *io1* volumes. I/O operations per second per GiB. -The AWS volume plugin multiplies this with the size of the requested -volume to compute IOPS of the volume. The value cap is 20,000 IOPS, which -is the maximum supported by AWS. See the -link:http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html[AWS documentation] -for further details. -<4> Optional: Denotes whether to encrypt the EBS volume. Valid values -are `true` or `false`. -<5> Optional: The full ARN of the key to use when encrypting the volume. -If none is supplied, but `encypted` is set to `true`, then AWS generates a -key. See the -link:http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html[AWS documentation] -for a valid ARN value. -<6> Optional: File system that is created on dynamically provisioned -volumes. This value is copied to the `fsType` field of dynamically -provisioned persistent volumes and the file system is created when the -volume is mounted for the first time. The default value is `ext4`. diff --git a/modules/dynamic-provisioning-azure-disk-definition.adoc b/modules/dynamic-provisioning-azure-disk-definition.adoc deleted file mode 100644 index 6b1fa8306bc4..000000000000 --- a/modules/dynamic-provisioning-azure-disk-definition.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="azure-disk-definition_{context}"] -= Azure Disk object definition - -.azure-advanced-disk-storageclass.yaml -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: <storage-class-name> <1> -provisioner: kubernetes.io/azure-disk -volumeBindingMode: WaitForFirstConsumer <2> -allowVolumeExpansion: true -parameters: - kind: Managed <3> - storageaccounttype: Premium_LRS <4> -reclaimPolicy: Delete ----- -<1> Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> Using `WaitForFirstConsumer` is strongly recommended. This provisions the volume while allowing enough storage to schedule the pod on a free worker node from an available zone. -<3> Possible values are `Shared` (default), `Managed`, and `Dedicated`. -+ -[IMPORTANT] -==== -Red Hat only supports the use of `kind: Managed` in the storage class. - -With `Shared` and `Dedicated`, Azure creates unmanaged disks, while {product-title} creates a managed disk for machine OS (root) disks. But because Azure Disk does not allow the use of both managed and unmanaged disks on a node, unmanaged disks created with `Shared` or `Dedicated` cannot be attached to {product-title} nodes. -==== - -<4> Azure storage account SKU tier. Default is empty. Note that Premium VMs can attach both `Standard_LRS` and `Premium_LRS` disks, Standard VMs can only attach `Standard_LRS` disks, Managed VMs can only attach managed disks, and unmanaged VMs can only attach unmanaged disks. -+ -.. If `kind` is set to `Shared`, Azure creates all unmanaged disks in a few shared storage accounts in the same resource group as the cluster. -.. If `kind` is set to `Managed`, Azure creates new managed disks. -.. If `kind` is set to `Dedicated` and a `storageAccount` is specified, Azure uses the specified storage account for the new unmanaged disk in the same resource group as the cluster. For this to work: - * The specified storage account must be in the same region. - * Azure Cloud Provider must have write access to the storage account. -.. If `kind` is set to `Dedicated` and a `storageAccount` is not specified, Azure creates a new dedicated storage account for the new unmanaged disk in the same resource group as the cluster. diff --git a/modules/dynamic-provisioning-azure-file-considerations.adoc b/modules/dynamic-provisioning-azure-file-considerations.adoc deleted file mode 100644 index 5a89d1d66e99..000000000000 --- a/modules/dynamic-provisioning-azure-file-considerations.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent-storage-azure-file.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="azure-file-considerations_{context}"] -= Considerations when using Azure File - -The following file system features are not supported by the default Azure File storage class: - -* Symlinks -* Hard links -* Extended attributes -* Sparse files -* Named pipes - -Additionally, the owner user identifier (UID) of the Azure File mounted directory is different from the process UID of the container. The `uid` mount option can be specified in the `StorageClass` object to define -a specific user identifier to use for the mounted directory. - -The following `StorageClass` object demonstrates modifying the user and group identifier, along with enabling symlinks for the mounted directory. - -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: azure-file -mountOptions: - - uid=1500 <1> - - gid=1500 <2> - - mfsymlinks <3> -provisioner: kubernetes.io/azure-file -parameters: - location: eastus - skuName: Standard_LRS -reclaimPolicy: Delete -volumeBindingMode: Immediate ----- -<1> Specifies the user identifier to use for the mounted directory. -<2> Specifies the group identifier to use for the mounted directory. -<3> Enables symlinks. diff --git a/modules/dynamic-provisioning-azure-file-definition.adoc b/modules/dynamic-provisioning-azure-file-definition.adoc deleted file mode 100644 index 62301a8f7ce0..000000000000 --- a/modules/dynamic-provisioning-azure-file-definition.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - - -:_content-type: PROCEDURE -[id="azure-file-definition_{context}"] -= Azure File object definition - -The Azure File storage class uses secrets to store the Azure storage account name -and the storage account key that are required to create an Azure Files share. These -permissions are created as part of the following procedure. - -.Procedure - -. Define a `ClusterRole` object that allows access to create and view secrets: -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: -# name: system:azure-cloud-provider - name: <persistent-volume-binder-role> <1> -rules: -- apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] ----- -<1> The name of the cluster role to view and create secrets. - -. Add the cluster role to the service account: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user <persistent-volume-binder-role> system:serviceaccount:kube-system:persistent-volume-binder ----- - -. Create the Azure File `StorageClass` object: -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: <azure-file> <1> -provisioner: kubernetes.io/azure-file -parameters: - location: eastus <2> - skuName: Standard_LRS <3> - storageAccount: <storage-account> <4> -reclaimPolicy: Delete -volumeBindingMode: Immediate ----- -<1> Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> Location of the Azure storage account, such as `eastus`. Default is empty, meaning that a new Azure storage account will be created in the {product-title} cluster's location. -<3> SKU tier of the Azure storage account, such as `Standard_LRS`. Default is empty, meaning that a new Azure storage account will be created with the `Standard_LRS` SKU. -<4> Name of the Azure storage account. If a storage account is provided, then -`skuName` and `location` are ignored. If no storage account is provided, then -the storage class searches for any storage account that is associated with the -resource group for any accounts that match the defined `skuName` and `location`. diff --git a/modules/dynamic-provisioning-ceph-rbd-definition.adoc b/modules/dynamic-provisioning-ceph-rbd-definition.adoc deleted file mode 100644 index 6dbaed5ef5af..000000000000 --- a/modules/dynamic-provisioning-ceph-rbd-definition.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc - -[id="ceph-rbd-definition_{context}"] -= Ceph RBD object definition - -.ceph-storageclass.yaml -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: cephfs -provisioner: kubernetes.io/rbd -parameters: - monitors: 10.16.153.105:6789 <1> - adminId: admin <2> - adminSecretName: ceph-secret <3> - adminSecretNamespace: kube-system <4> - pool: kube <5> - userId: kube <6> - userSecretName: ceph-secret-user <7> - fsType: ext4 <8> - imageFormat: "2" <9> ----- -<1> (required) A comma-delimited list of Ceph monitors. -<2> Optional: Ceph client ID that is capable of creating images in the -pool. Default is `admin`. -<3> (required) Secret Name for `adminId`. The provided secret must have -type `kubernetes.io/rbd`. -<4> Optional: The namespace for `adminSecret`. Default is `default`. -<5> Optional: Ceph RBD pool. Default is `rbd`. -<6> Optional: Ceph client ID that is used to map the Ceph RBD image. -Default is the same as `adminId`. -<7> (required) The name of Ceph Secret for `userId` to map Ceph RBD image. -It must exist in the same namespace as PVCs. -<8> Optional: File system that is created on dynamically provisioned -volumes. This value is copied to the `fsType` field of dynamically -provisioned persistent volumes and the file system is created when the -volume is mounted for the first time. The default value is `ext4`. -<9> Optional: Ceph RBD image format. The default value is `2`. diff --git a/modules/dynamic-provisioning-change-default-class.adoc b/modules/dynamic-provisioning-change-default-class.adoc deleted file mode 100644 index 4692a88acf0e..000000000000 --- a/modules/dynamic-provisioning-change-default-class.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc -// * microshift_storage/dynamic-provisioning-microshift.adoc - - -[id="change-default-storage-class_{context}"] -= Changing the default storage class - -Use the following procedure to change the default storage class. - -For example, if you have two defined storage classes, `gp3` and `standard`, and you want to change the default storage class from `gp3` to `standard`. - -.Prerequisites - -* Access to the cluster with cluster-admin privileges. - -.Procedure - -To change the default storage class: - -. List the storage classes: -+ -[source,terminal] ----- -$ oc get storageclass ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE -gp3 (default) kubernetes.io/aws-ebs <1> -standard kubernetes.io/aws-ebs ----- -<1> `(default)` indicates the default storage class. - -. Make the desired storage class the default. -+ -For the desired storage class, set the `storageclass.kubernetes.io/is-default-class` annotation to `true` by running the following command: -+ -[source,terminal] ----- -$ oc patch storageclass standard -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' ----- -+ -[NOTE] -==== -You can have multiple default storage classes for a short time. However, you should ensure that only one default storage class exists eventually. - -With multiple default storage classes present, any persistent volume claim (PVC) requesting the default storage class (`pvc.spec.storageClassName`=nil) gets the most recently created default storage class, regardless of the default status of that storage class, and the administrator receives an alert in the alerts dashboard that there are multiple default storage classes, `MultipleDefaultStorageClasses`. - -// add xref to multi/no default SC module -==== - -. Remove the default storage class setting from the old default storage class. -+ -For the old default storage class, change the value of the `storageclass.kubernetes.io/is-default-class` annotation to `false` by running the following command: -+ -[source,terminal] ----- -$ oc patch storageclass gp3 -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' ----- - -. Verify the changes: -+ -[source,terminal] ----- -$ oc get storageclass ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE -gp3 kubernetes.io/aws-ebs -standard (default) kubernetes.io/aws-ebs ----- diff --git a/modules/dynamic-provisioning-cinder-definition.adoc b/modules/dynamic-provisioning-cinder-definition.adoc deleted file mode 100644 index 1a8e8698bc19..000000000000 --- a/modules/dynamic-provisioning-cinder-definition.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="openstack-cinder-storage-class_{context}"] -= {rh-openstack} Cinder object definition - -.cinder-storageclass.yaml -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: <storage-class-name> <1> -provisioner: kubernetes.io/cinder -parameters: - type: fast <2> - availability: nova <3> - fsType: ext4 <4> ----- -<1> Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> Volume type created in Cinder. Default is empty. -<3> Availability Zone. If not specified, volumes are generally -round-robined across all active zones where the {product-title} cluster -has a node. -<4> File system that is created on dynamically provisioned volumes. This -value is copied to the `fsType` field of dynamically provisioned -persistent volumes and the file system is created when the volume is -mounted for the first time. The default value is `ext4`. diff --git a/modules/dynamic-provisioning-defining-storage-class.adoc b/modules/dynamic-provisioning-defining-storage-class.adoc deleted file mode 100644 index 38f61a83146e..000000000000 --- a/modules/dynamic-provisioning-defining-storage-class.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc -// * microshift_storage/dynamic-provisioning-microshift.adoc - - -[id="defining-storage-classes_{context}"] -= Defining a storage class - -`StorageClass` objects are currently a globally scoped object and must be -created by `cluster-admin` or `storage-admin` users. - -ifndef::microshift,openshift-rosa[] -[IMPORTANT] -==== -The Cluster Storage Operator might install a default storage class depending -on the platform in use. This storage class is owned and controlled by the -Operator. It cannot be deleted or modified beyond defining annotations -and labels. If different behavior is desired, you must define a custom -storage class. -==== -endif::microshift,openshift-rosa[] -ifdef::openshift-rosa[] -[IMPORTANT] -==== -The Cluster Storage Operator installs a default storage class. This storage class is owned and controlled by the Operator. It cannot be deleted or modified beyond defining annotations and labels. If different behavior is desired, you must define a custom storage class. -==== -endif::openshift-rosa[] - -The following sections describe the basic definition for a -`StorageClass` object and specific examples for each of the supported plugin types. diff --git a/modules/dynamic-provisioning-gce-definition.adoc b/modules/dynamic-provisioning-gce-definition.adoc deleted file mode 100644 index 1dbe8c21a7cd..000000000000 --- a/modules/dynamic-provisioning-gce-definition.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="gce-persistentdisk-storage-class_{context}"] -= GCE PersistentDisk (gcePD) object definition - -.gce-pd-storageclass.yaml -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: <storage-class-name> <1> -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-standard <2> - replication-type: none -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -reclaimPolicy: Delete ----- -<1> Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> Select either `pd-standard` or `pd-ssd`. The default is `pd-standard`. diff --git a/modules/dynamic-provisioning-gluster-definition.adoc b/modules/dynamic-provisioning-gluster-definition.adoc deleted file mode 100644 index 51934a58e2d3..000000000000 --- a/modules/dynamic-provisioning-gluster-definition.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc - -[id="gluster-definition_{context}"] -= GlusterFS object definition - -.glusterfs-storageclass.yaml -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/glusterfs -parameters: <1> - resturl: http://127.0.0.1:8081 <2> - restuser: admin <3> - secretName: heketi-secret <4> - secretNamespace: default <5> - gidMin: "40000" <6> - gidMax: "50000" <7> - volumeoptions: group metadata-cache, nl-cache on <8> - volumetype: replicate:3 <9> ----- -<1> Listed are mandatory and a few optional parameters. Refer to -link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/3.10/html-single/operations_guide/#sect_file_reg_storageclass[Registering a Storage Class] for additional parameters. -<2> link:https://github.com/heketi/heketi[heketi] (volume management REST -service for Gluster) URL that provisions GlusterFS volumes on demand. The -general format should be `{http/https}://{IPaddress}:{Port}`. This is a -mandatory parameter for the GlusterFS dynamic provisioner. If the heketi -service is exposed as a routable service in the {product-title}, it will -have a resolvable fully qualified domain name (FQDN) and heketi service URL. -<3> heketi user who has access to create volumes. This is typically `admin`. -<4> Identification of a Secret that contains a user password to use when -talking to heketi. An empty password will be used -when both `secretNamespace` and `secretName` are omitted. -The provided secret must be of type `"kubernetes.io/glusterfs"`. -<5> The namespace of mentioned `secretName`. An empty password will be used -when both `secretNamespace` and `secretName` are omitted. The provided -Secret must be of type `"kubernetes.io/glusterfs"`. -<6> Optional. The minimum value of the GID range for volumes of this -StorageClass. -<7> Optional. The maximum value of the GID range for volumes of this -StorageClass. -<8> Optional. Options for newly created volumes. It allows for -performance tuning. See -link:https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/#tuning-volume-options[Tuning Volume Options] -for more GlusterFS volume options. -<9> Optional. The -link:https://docs.gluster.org/en/v3/Quick-Start-Guide/Architecture/[type of volume] -to use. - -[NOTE] -==== -When the `gidMin` and `gidMax` values are not specified, their defaults are -2000 and 2147483647 respectively. Each dynamically provisioned volume -will be given a GID in this range (`gidMin-gidMax`). This GID is released -from the pool when the respective volume is deleted. The GID pool is -per StorageClass. -If two or more storage classes have GID ranges that overlap there may be -duplicate GIDs dispatched by the provisioner. -==== - -When heketi authentication is used, a Secret containing the admin key must -also exist. - -[source,terminal] ----- -$ oc create secret generic heketi-secret --from-literal=key=<password> -n <namespace> --type=kubernetes.io/glusterfs ----- - -This results in the following configuration: - -.heketi-secret.yaml -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: heketi-secret - namespace: namespace - ... -data: - key: cGFzc3dvcmQ= <1> -type: kubernetes.io/glusterfs ----- -<1> base64 encoded password - -[NOTE] -==== -When the PVs are dynamically provisioned, the GlusterFS plugin -automatically creates an Endpoints and a headless Service named -`gluster-dynamic-<claimname>`. When the PVC is deleted, these dynamic -resources are deleted automatically. -==== diff --git a/modules/dynamic-provisioning-manila-csi-definition.adoc b/modules/dynamic-provisioning-manila-csi-definition.adoc deleted file mode 100644 index 844bcda0f8de..000000000000 --- a/modules/dynamic-provisioning-manila-csi-definition.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc - -[id="openstack-manila-csi-definition_{context}"] -= {rh-openstack} Manila Container Storage Interface (CSI) object definition - -Once installed, the OpenStack Manila CSI Driver Operator and ManilaDriver automatically create the required storage classes for all available Manila share types needed for dynamic provisioning. diff --git a/modules/dynamic-provisioning-storage-class-definition.adoc b/modules/dynamic-provisioning-storage-class-definition.adoc deleted file mode 100644 index 436a78ed6962..000000000000 --- a/modules/dynamic-provisioning-storage-class-definition.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc -// * microshift_storage/dynamic-provisioning-microshift.adoc - - -[id="basic-storage-class-definition_{context}"] -= Basic StorageClass object definition - -The following resource shows the parameters and default values that you -use to configure a storage class. This example uses the AWS -ElasticBlockStore (EBS) object definition. - - -.Sample `StorageClass` definition -[source,yaml] ----- -kind: StorageClass <1> -apiVersion: storage.k8s.io/v1 <2> -metadata: - name: <storage-class-name> <3> - annotations: <4> - storageclass.kubernetes.io/is-default-class: 'true' - ... -provisioner: kubernetes.io/aws-ebs <5> -parameters: <6> - type: gp3 -... ----- -<1> (required) The API object type. -<2> (required) The current apiVersion. -<3> (required) The name of the storage class. -<4> (optional) Annotations for the storage class. -<5> (required) The type of provisioner associated with this storage class. -<6> (optional) The parameters required for the specific provisioner, this -will change from plugin to plug-iin. diff --git a/modules/dynamic-provisioning-vsphere-definition.adoc b/modules/dynamic-provisioning-vsphere-definition.adoc deleted file mode 100644 index ee169124a417..000000000000 --- a/modules/dynamic-provisioning-vsphere-definition.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following definitions: -// -// * storage/dynamic-provisioning.adoc -// * post_installation_configuration/storage-configuration.adoc - - -[id="vsphere-definition_{context}"] -= VMware vSphere object definition - -.vsphere-storageclass.yaml -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: <storage-class-name> <1> -provisioner: kubernetes.io/vsphere-volume <2> -parameters: - diskformat: thin <3> ----- -<1> Name of the storage class. The persistent volume claim uses this storage class for provisioning the associated persistent volumes. -<2> For more information about using VMware vSphere with {product-title}, -see the -link:https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/index.html[VMware vSphere documentation]. -<3> `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick` are all -valid disk formats. See vSphere docs for additional details regarding the -disk format types. The default value is `thin`. diff --git a/modules/eco-about-node-maintenance-standalone.adoc b/modules/eco-about-node-maintenance-standalone.adoc deleted file mode 100644 index 7482ac3101ca..000000000000 --- a/modules/eco-about-node-maintenance-standalone.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: CONCEPT -[id="eco-about-node-maintenance-operator_{context}"] -= About the Node Maintenance Operator - -The Node Maintenance Operator watches for new or deleted `NodeMaintenance` CRs. When a new `NodeMaintenance` CR is detected, no new workloads are scheduled and the node is cordoned off from the rest of the cluster. All pods that can be evicted are evicted from the node. When a `NodeMaintenance` CR is deleted, the node that is referenced in the CR is made available for new workloads. - -[NOTE] -==== -Using a `NodeMaintenance` CR for node maintenance tasks achieves the same results as the `oc adm cordon` and `oc adm drain` commands using standard {product-title} CR processing. -==== diff --git a/modules/eco-checking_status_of_node_maintenance_cr_tasks.adoc b/modules/eco-checking_status_of_node_maintenance_cr_tasks.adoc deleted file mode 100644 index 6a6df77203ea..000000000000 --- a/modules/eco-checking_status_of_node_maintenance_cr_tasks.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-checking_status_of_node_maintenance_cr_tasks_{context}"] -= Checking status of current NodeMaintenance CR tasks - -You can check the status of current `NodeMaintenance` CR tasks. - -.Prerequisites - -* Install the {product-title} CLI `oc`. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -* Check the status of current node maintenance tasks, for example the `NodeMaintenance` CR or `nm` object, by running the following command: -+ -[source,terminal] ----- -$ oc get nm -o yaml ----- -+ -.Example output -+ -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: nodemaintenance.medik8s.io/v1beta1 - kind: NodeMaintenance - metadata: -... - spec: - nodeName: node-1.example.com - reason: Node maintenance - status: - drainProgress: 100 <1> - evictionPods: 3 <2> - lastError: "Last failure message" <3> - lastUpdate: "2022-06-23T11:43:18Z" <4> - phase: Succeeded - totalpods: 5 <5> -... ----- -<1> The percentage completion of draining the node. -<2> The number of pods scheduled for eviction. -<3> The latest eviction error, if any. -<4> The last time the status was updated. -<5> The total number of pods before the node entered maintenance mode. diff --git a/modules/eco-configuring-control-plane-machine-health-check-with-self-node-remediation.adoc b/modules/eco-configuring-control-plane-machine-health-check-with-self-node-remediation.adoc deleted file mode 100644 index 6bf269cf942b..000000000000 --- a/modules/eco-configuring-control-plane-machine-health-check-with-self-node-remediation.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-poison-pill-operator.adoc - -:_content-type: PROCEDURE -[id="configuring-control-plane-machine-health-check-with-self-node-remediation-operator_{context}"] -= Configuring control-plane machine health checks to use the Self Node Remediation Operator - -Use the following procedure to configure the control-plane machine health checks to use the Self Node Remediation Operator as a remediation provider. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `SelfNodeRemediationTemplate` CR: - -.. Define the `SelfNodeRemediationTemplate` CR: -+ -[source,yaml] ----- -apiVersion: self-node-remediation.medik8s.io/v1alpha1 -kind: SelfNodeRemediationTemplate -metadata: - namespace: openshift-machine-api - name: selfnoderemediationtemplate-sample -spec: - template: - spec: - remediationStrategy: ResourceDeletion <1> ----- -<1> Specifies the remediation strategy. The default strategy is `ResourceDeletion`. - -.. To create the `SelfNodeRemediationTemplate` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f <snrt-name>.yaml ----- - -. Create or update the `MachineHealthCheck` CR to point to the `SelfNodeRemediationTemplate` CR: - -.. Define or update the `MachineHealthCheck` CR: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: machine-health-check - namespace: openshift-machine-api -spec: - selector: - matchLabels: - machine.openshift.io/cluster-api-machine-role: "control-plane" - machine.openshift.io/cluster-api-machine-type: "control-plane" - unhealthyConditions: - - type: "Ready" - timeout: "300s" - status: "False" - - type: "Ready" - timeout: "300s" - status: "Unknown" - maxUnhealthy: "40%" - nodeStartupTimeout: "10m" - remediationTemplate: <1> - kind: SelfNodeRemediationTemplate - apiVersion: self-node-remediation.medik8s.io/v1alpha1 - name: selfnoderemediationtemplate-sample ----- -<1> Specifies the details for the remediation template. - -+ -.. To create a `MachineHealthCheck` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f <mhc-name>.yaml ----- - -.. To update a `MachineHealthCheck` CR, run the following command: -+ -[source,terminal] ----- -$ oc apply -f <mhc-name>.yaml ----- diff --git a/modules/eco-configuring-machine-health-check-with-self-node-remediation.adoc b/modules/eco-configuring-machine-health-check-with-self-node-remediation.adoc deleted file mode 100644 index 95cab1ac7228..000000000000 --- a/modules/eco-configuring-machine-health-check-with-self-node-remediation.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// *nodes/nodes/eco-poison-pill-operator.adoc - -:_content-type: PROCEDURE -[id="configuring-machine-health-check-with-self-node-remediation-operator_{context}"] -= Configuring machine health checks to use the Self Node Remediation Operator - -Use the following procedure to configure the worker or control-plane machine health checks to use the Self Node Remediation Operator as a remediation provider. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `SelfNodeRemediationTemplate` CR: - -.. Define the `SelfNodeRemediationTemplate` CR: -+ -[source,yaml] ----- -apiVersion: self-node-remediation.medik8s.io/v1alpha1 -kind: SelfNodeRemediationTemplate -metadata: - namespace: openshift-machine-api - name: selfnoderemediationtemplate-sample -spec: - template: - spec: - remediationStrategy: ResourceDeletion <1> ----- -<1> Specifies the remediation strategy. The default strategy is `ResourceDeletion`. - -.. To create the `SelfNodeRemediationTemplate` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f <snrt-name>.yaml ----- - -. Create or update the `MachineHealthCheck` CR to point to the `SelfNodeRemediationTemplate` CR: - -.. Define or update the `MachineHealthCheck` CR: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: machine-health-check - namespace: openshift-machine-api -spec: - selector: - matchLabels: <1> - machine.openshift.io/cluster-api-machine-role: "worker" - machine.openshift.io/cluster-api-machine-type: "worker" - unhealthyConditions: - - type: "Ready" - timeout: "300s" - status: "False" - - type: "Ready" - timeout: "300s" - status: "Unknown" - maxUnhealthy: "40%" - nodeStartupTimeout: "10m" - remediationTemplate: <2> - kind: SelfNodeRemediationTemplate - apiVersion: self-node-remediation.medik8s.io/v1alpha1 - name: selfnoderemediationtemplate-sample ----- -<1> Selects whether the machine health check is for `worker` or `control-plane` nodes. The label can also be user-defined. -<2> Specifies the details for the remediation template. - -+ -.. To create a `MachineHealthCheck` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f <mhc-name>.yaml ----- - -.. To update a `MachineHealthCheck` CR, run the following command: -+ -[source,terminal] ----- -$ oc apply -f <mhc-name>.yaml ----- diff --git a/modules/eco-maintaining-bare-metal-nodes.adoc b/modules/eco-maintaining-bare-metal-nodes.adoc deleted file mode 100644 index 0f778eaa1888..000000000000 --- a/modules/eco-maintaining-bare-metal-nodes.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: CONCEPT -[id="eco-maintaining-bare-metal-nodes_{context}"] -= Maintaining bare-metal nodes - -When you deploy {product-title} on bare-metal infrastructure, you must take additional considerations into account compared to deploying on cloud infrastructure. Unlike in cloud environments, where the cluster nodes are considered ephemeral, reprovisioning a bare-metal node requires significantly more time and effort for maintenance tasks. - -When a bare-metal node fails due to a kernel error or a NIC card hardware failure, workloads on the failed node need to be restarted on another node in the cluster while the problem node is repaired or replaced. Node maintenance mode allows cluster administrators to gracefully turn-off nodes, move workloads to other parts of the cluster, and ensure that workloads do not get interrupted. Detailed progress and node status details are provided during maintenance. - diff --git a/modules/eco-node-health-check-operator-about.adoc b/modules/eco-node-health-check-operator-about.adoc deleted file mode 100644 index 04a60196e055..000000000000 --- a/modules/eco-node-health-check-operator-about.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-node-health-check-operator.adoc - -:_content-type: CONCEPT -[id="about-node-health-check-operator_{context}"] -= About the Node Health Check Operator - -The Node Health Check Operator detects the health of the nodes in a cluster. The `NodeHealthCheck` controller creates the `NodeHealthCheck` custom resource (CR), which defines a set of criteria and thresholds to determine the health of a node. - -The Node Health Check Operator also installs the Self Node Remediation Operator as a default remediation provider. - -When the Node Health Check Operator detects an unhealthy node, it creates a remediation CR that triggers the remediation provider. For example, the controller creates the `SelfNodeRemediation` CR, which triggers the Self Node Remediation Operator to remediate the unhealthy node. - -The `NodeHealthCheck` CR resembles the following YAML file: - -[source,yaml] ----- -apiVersion: remediation.medik8s.io/v1alpha1 -kind: NodeHealthCheck -metadata: - name: nodehealthcheck-sample -spec: - minHealthy: 51% <1> - pauseRequests: <2> - - <pause-test-cluster> - remediationTemplate: <3> - apiVersion: self-node-remediation.medik8s.io/v1alpha1 - name: self-node-remediation-resource-deletion-template - namespace: openshift-operators - kind: SelfNodeRemediationTemplate - selector: <4> - matchExpressions: - - key: node-role.kubernetes.io/worker - operator: Exists - unhealthyConditions: <5> - - type: Ready - status: "False" - duration: 300s <6> - - type: Ready - status: Unknown - duration: 300s <6> ----- - -<1> Specifies the amount of healthy nodes(in percentage or number) required for a remediation provider to concurrently remediate nodes in the targeted pool. If the number of healthy nodes equals to or exceeds the limit set by `minHealthy`, remediation occurs. The default value is 51%. -<2> Prevents any new remediation from starting, while allowing any ongoing remediations to persist. The default value is empty. However, you can enter an array of strings that identify the cause of pausing the remediation. For example, `pause-test-cluster`. -+ -[NOTE] -==== -During the upgrade process, nodes in the cluster might become temporarily unavailable and get identified as unhealthy. In the case of worker nodes, when the Operator detects that the cluster is upgrading, it stops remediating new unhealthy nodes to prevent such nodes from rebooting. -==== -<3> Specifies a remediation template from the remediation provider. For example, from the Self Node Remediation Operator. -<4> Specifies a `selector` that matches labels or expressions that you want to check. The default value is empty, which selects all nodes. -<5> Specifies a list of the conditions that determine whether a node is considered unhealthy. -<6> Specifies the timeout duration for a node condition. If a condition is met for the duration of the timeout, the node will be remediated. Long timeouts can result in long periods of downtime for a workload on an unhealthy node. - -[id="understanding-nhc-operator-workflow_{context}"] -== Understanding the Node Health Check Operator workflow - -When a node is identified as unhealthy, the Node Health Check Operator checks how many other nodes are unhealthy. If the number of healthy nodes exceeds the amount that is specified in the `minHealthy` field of the `NodeHealthCheck` CR, the controller creates a remediation CR from the details that are provided in the external remediation template by the remediation provider. After remediation, the kubelet updates the node's health status. - -When the node turns healthy, the controller deletes the external remediation template. - -[id="how-nhc-prevent-conflict-with-mhc_{context}"] -== About how node health checks prevent conflicts with machine health checks - -When both, node health checks and machine health checks are deployed, the node health check avoids conflict with the machine health check. - -[NOTE] -==== -{product-title} deploys `machine-api-termination-handler` as the default `MachineHealthCheck` resource. -==== - -The following list summarizes the system behavior when node health checks and machine health checks are deployed: - -* If only the default machine health check exists, the node health check continues to identify unhealthy nodes. However, the node health check ignores unhealthy nodes in a Terminating state. The default machine health check handles the unhealthy nodes with a Terminating state. -+ -.Example log message -[source,terminal] ----- -INFO MHCChecker ignoring unhealthy Node, it is terminating and will be handled by MHC {"NodeName": "node-1.example.com"} ----- - -* If the default machine health check is modified (for example, the `unhealthyConditions` is `Ready`), or if additional machine health checks are created, the node health check is disabled. -+ -.Example log message ----- -INFO controllers.NodeHealthCheck disabling NHC in order to avoid conflict with custom MHCs configured in the cluster {"NodeHealthCheck": "/nhc-worker-default"} ----- - -* When, again, only the default machine health check exists, the node health check is re-enabled. -+ -.Example log message ----- -INFO controllers.NodeHealthCheck re-enabling NHC, no conflicting MHC configured in the cluster {"NodeHealthCheck": "/nhc-worker-default"} ----- diff --git a/modules/eco-node-health-check-operator-control-plane-fencing.adoc b/modules/eco-node-health-check-operator-control-plane-fencing.adoc deleted file mode 100644 index b8276e8d11b8..000000000000 --- a/modules/eco-node-health-check-operator-control-plane-fencing.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-node-health-check-operator.adoc - -:_content-type: CONCEPT -[id="control-plane-fencing-node-health-check-operator_{context}"] -= Control plane fencing - -In earlier releases, you could enable Self Node Remediation and Node Health Check on worker nodes. In the event of node failure, you can now also follow remediation strategies on control plane nodes. - -Do not use the same `NodeHealthCheck` CR for worker nodes and control plane nodes. Grouping worker nodes and control plane nodes together can result in incorrect evaluation of the minimum healthy node count, and cause unexpected or missing remediations. This is because of the way the Node Health Check Operator handles control plane nodes. You should group the control plane nodes in their own group and the worker nodes in their own group. If required, you can also create multiple groups of worker nodes. - -Considerations for remediation strategies: - -* Avoid Node Health Check configurations that involve multiple configurations overlapping the same nodes because they can result in unexpected behavior. This suggestion applies to both worker and control plane nodes. -* The Node Health Check Operator implements a hardcoded limitation of remediating a maximum of one control plane node at a time. Multiple control plane nodes should not be remediated at the same time. diff --git a/modules/eco-node-health-check-operator-creating-node-health-check.adoc b/modules/eco-node-health-check-operator-creating-node-health-check.adoc deleted file mode 100644 index 0e4c36d42d36..000000000000 --- a/modules/eco-node-health-check-operator-creating-node-health-check.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * ../nodes/nodes/eco-node-health-check-operator.adoc - -:_content-type: PROCEDURE -[id="eco-node-health-check-operator-creating-node-health-check_{context}"] -= Creating a node health check -Using the web console, you can create a node health check to identify unhealthy nodes and specify the remediation type and strategy to fix them. - -.Procedure - -. From the *Administrator* perspective of the {product-title} web console, click *Compute* -> *NodeHealthChecks* -> *CreateNodeHealthCheck*. -. Specify whether to configure the node health check using the *Form view* or the *YAML view*. -. Enter a *Name* for the node health check. The name must consist of lower case, alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. -. Specify the *Remediator* type, and *Self node remediation* or *Other*. The Self node remediation option is part of the Self Node Remediation Operator that is installed with the Node Health Check Operator. Selecting *Other* requires an *API version*, *Kind*, *Name*, and *Namespace* to be entered, which then points to the remediation template resource of a remediator. -. Make a *Nodes* selection by specifying the labels of the nodes you want to remediate. The selection matches labels that you want to check. If more than one label is specified, the nodes must contain each label. The default value is empty, which selects both worker and control-plane nodes. -+ -[NOTE] -==== -When creating a node health check with the Self Node Remediation Operator, you must select either `node-role.kubernetes.io/worker` or `node-role.kubernetes.io/control-plane` as the value. -==== -+ -. Specify the minimum number of healthy nodes, using either a percentage or a number, required for a *NodeHealthCheck* to remediate nodes in the targeted pool. If the number of healthy nodes equals to or exceeds the limit set by *Min healthy*, remediation occurs. The default value is 51%. -. Specify a list of *Unhealthy conditions* that if a node meets determines whether the node is considered unhealthy, and requires remediation. You can specify the *Type*, *Status* and *Duration*. You can also create your own custom type. -. Click *Create* to create the node health check. - -.Verification - -* Navigate to the *Compute* -> *NodeHealthCheck* page and verify that the corresponding node health check is listed, and their status displayed. Once created, node health checks can be paused, modified, and deleted. diff --git a/modules/eco-node-health-check-operator-installation-cli.adoc b/modules/eco-node-health-check-operator-installation-cli.adoc deleted file mode 100644 index bca1ea8e8f6c..000000000000 --- a/modules/eco-node-health-check-operator-installation-cli.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/node-health-check-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-node-health-check-operator-using-cli_{context}"] -= Installing the Node Health Check Operator by using the CLI -You can use the OpenShift CLI (`oc`) to install the Node Health Check Operator. - -To install the Operator in your own namespace, follow the steps in the procedure. - -To install the Operator in the `openshift-operators` namespace, skip to step 3 of the procedure because the steps to create a new `Namespace` custom resource (CR) and an `OperatorGroup` CR are not required. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `Namespace` custom resource (CR) for the Node Health Check Operator: -.. Define the `Namespace` CR and save the YAML file, for example, `node-health-check-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: node-health-check ----- -.. To create the `Namespace` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-health-check-namespace.yaml ----- - -. Create an `OperatorGroup` CR: -.. Define the `OperatorGroup` CR and save the YAML file, for example, `node-health-check-operator-group.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: node-health-check-operator - namespace: node-health-check ----- -.. To create the `OperatorGroup` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-health-check-operator-group.yaml ----- - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `node-health-check-subscription.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: node-health-check-operator - namespace: node-health-check <1> -spec: - channel: stable <2> - installPlanApproval: Manual <3> - name: node-healthcheck-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - package: node-healthcheck-operator ----- -<1> Specify the `Namespace` where you want to install the Node Health Check Operator. To install the Node Health Check Operator in the `openshift-operators` namespace, specify `openshift-operators` in the `Subscription` CR. -<2> Specify the channel name for your subscription. To upgrade to the latest version of the Node Health Check Operator, you must manually change the channel name for your subscription from `candidate` to `stable`. -<3> Set the approval strategy to Manual in case your specified version is superseded by a later version in the catalog. This plan prevents an automatic upgrade to a later version and requires manual approval before the starting CSV can complete the installation. - -.. To create the `Subscription` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-health-check-subscription.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource: -+ -[source,terminal] ----- -$ oc get csv -n openshift-operators ----- -+ -.Example output - -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -node-healthcheck-operator.v0.2.0. Node Health Check Operator 0.2.0 Succeeded ----- -. Verify that the Node Health Check Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-operators ----- -+ -.Example output - -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -node-health-check-operator-controller-manager 1/1 1 1 10d ----- diff --git a/modules/eco-node-health-check-operator-installation-web-console.adoc b/modules/eco-node-health-check-operator-installation-web-console.adoc deleted file mode 100644 index 76c8924f9e90..000000000000 --- a/modules/eco-node-health-check-operator-installation-web-console.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/node-health-check-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-node-health-check-operator-using-web-console_{context}"] -= Installing the Node Health Check Operator by using the web console - -You can use the {product-title} web console to install the Node Health Check Operator. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the Node Health Check Operator, then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-operators` namespace. -. Ensure that the *Console plug-in* is set to `Enable`. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-operators` project that are reporting issues. diff --git a/modules/eco-node-maintenance-operator-installation-cli.adoc b/modules/eco-node-maintenance-operator-installation-cli.adoc deleted file mode 100644 index c17565495b4d..000000000000 --- a/modules/eco-node-maintenance-operator-installation-cli.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="installing-maintenance-operator-using-cli_{context}"] -= Installing the Node Maintenance Operator by using the CLI -You can use the OpenShift CLI (`oc`) to install the Node Maintenance Operator. - -You can install the Node Maintenance Operator in your own namespace or in the `openshift-operators` namespace. - -To install the Operator in your own namespace, follow the steps in the procedure. - -To install the Operator in the `openshift-operators` namespace, skip to step 3 of the procedure because the steps to create a new `Namespace` custom resource (CR) and an `OperatorGroup` CR are not required. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `Namespace` CR for the Node Maintenance Operator: -.. Define the `Namespace` CR and save the YAML file, for example, `node-maintenance-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: nmo-test ----- -.. To create the `Namespace` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-maintenance-namespace.yaml ----- - -. Create an `OperatorGroup` CR: -.. Define the `OperatorGroup` CR and save the YAML file, for example, `node-maintenance-operator-group.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: node-maintenance-operator - namespace: nmo-test ----- -.. To create the `OperatorGroup` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-maintenance-operator-group.yaml ----- - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `node-maintenance-subscription.yaml`: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: node-maintenance-operator - namespace: nmo-test <1> -spec: - channel: stable - InstallPlaneApproval: Automatic - name: node-maintenance-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - StartingCSV: node-maintenance-operator.v{product-version}.0 ----- -+ -<1> Specify the `Namespace` where you want to install the Node Maintenance Operator. -+ -[IMPORTANT] -==== -To install the Node Maintenance Operator in the `openshift-operators` namespace, specify `openshift-operators` in the `Subscription` CR. -==== - -.. To create the `Subscription` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f node-maintenance-subscription.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource: -+ -[source,terminal] ----- -$ oc get csv -n openshift-operators ----- -+ -.Example output - -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -node-maintenance-operator.v{product-version} Node Maintenance Operator {product-version} Succeeded ----- -. Verify that the Node Maintenance Operator is running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-operators ----- -+ -.Example output - -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -node-maintenance-operator-controller-manager 1/1 1 1 10d ----- diff --git a/modules/eco-node-maintenance-operator-installation-web-console.adoc b/modules/eco-node-maintenance-operator-installation-web-console.adoc deleted file mode 100644 index 319561f3bbca..000000000000 --- a/modules/eco-node-maintenance-operator-installation-web-console.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="installing-node-maintenance-operator-using-web-console_{context}"] -= Installing the Node Maintenance Operator by using the web console - -You can use the {product-title} web console to install the Node Maintenance Operator. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the Node Maintenance Operator, then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-operators` namespace. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Operators* -> *Installed Operators* -> *Node Maintenance Operator* -> *Details* page, and inspect the `Conditions` section for errors before pod creation. -. Navigate to the *Workloads* -> *Pods* page, search for the `Node Maintenance Operator` pod in the installed namespace, and check the logs in the `Logs` tab. diff --git a/modules/eco-resuming-node-maintenance-actions-web-console.adoc b/modules/eco-resuming-node-maintenance-actions-web-console.adoc deleted file mode 100644 index 85004f7c989c..000000000000 --- a/modules/eco-resuming-node-maintenance-actions-web-console.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-resuming-node-maintenance-actions-web-console_{context}"] -= Resuming a bare-metal node from maintenance mode -Resume a bare-metal node from maintenance mode using the Options menu {kebab} found on each node in the *Compute* -> *Nodes* list, or using the *Actions* control of the *Node Details* screen. - -.Procedure - -. From the *Administrator* perspective of the web console, click *Compute* -> *Nodes*. -. You can resume the node from this screen, which makes it easier to perform actions on multiple nodes, or from the *Node Details* screen, where you can view comprehensive details of the selected node: -** Click the Options menu {kebab} at the end of the node and select -*Stop Maintenance*. -** Click the node name to open the *Node Details* screen and click -*Actions* -> *Stop Maintenance*. -. Click *Stop Maintenance* in the confirmation window. - -The node becomes schedulable. If it had virtual machine instances that were running on the node prior to maintenance, then they will not automatically migrate back to this node. - -.Verification - -* Navigate to the *Compute* -> *Nodes* page and verify that the corresponding node has a status of `Ready`. diff --git a/modules/eco-resuming-node-maintenance-cr-cli.adoc b/modules/eco-resuming-node-maintenance-cr-cli.adoc deleted file mode 100644 index 0dbbed583f85..000000000000 --- a/modules/eco-resuming-node-maintenance-cr-cli.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-resuming-node-from-maintenance-mode-with-cr_{context}"] -= Resuming a node from maintenance mode by using the CLI - -You can resume a node from maintenance mode that was initiated with a `NodeMaintenance` CR by deleting the `NodeMaintenance` CR. - -.Prerequisites - -* Install the {product-title} CLI `oc`. -* Log in to the cluster as a user with `cluster-admin` privileges. - -.Procedure - -* When your node maintenance task is complete, delete the active `NodeMaintenance` CR: -+ -[source,terminal] ----- -$ oc delete -f nodemaintenance-cr.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -nodemaintenance.nodemaintenance.medik8s.io "maintenance-example" deleted ----- - -.Verification - -. Check the progress of the maintenance task by running the following command: -+ -[source,terminal] ----- -$ oc describe node <node-name> ----- -+ -where `<node-name>` is the name of your node; for example, `node-1.example.com` - -. Check the example output: -+ -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal NodeSchedulable 2m kubelet Node node-1.example.com status is now: NodeSchedulable ----- diff --git a/modules/eco-resuming-node-maintenance-cr-web-console.adoc b/modules/eco-resuming-node-maintenance-cr-web-console.adoc deleted file mode 100644 index 87703af9ebc6..000000000000 --- a/modules/eco-resuming-node-maintenance-cr-web-console.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-resuming-node-maintenance-web-console_{context}"] -= Resuming a node from maintenance mode by using the web console - -To resume a node from maintenance mode, you can delete a `NodeMaintenance` custom resource (CR) by using the web console. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. -* Install the Node Maintenance Operator from the *OperatorHub*. - -.Procedure - -. From the *Administrator* perspective in the web console, navigate to *Operators* → *Installed Operators*. - -. Select the Node Maintenance Operator from the list of Operators. - -. In the *Node Maintenance* tab, select the `NodeMaintenance` CR that you want to delete. - -. Click the Options menu {kebab} at the end of the node and select *Delete NodeMaintenance*. - -.Verification - -. In the {product-title} console, click *Compute → Nodes*. - -. Inspect the `Status` column of the node for which you deleted the `NodeMaintenance` CR and verify that its status is `Ready`. diff --git a/modules/eco-self-node-remediation-about-watchdog.adoc b/modules/eco-self-node-remediation-about-watchdog.adoc deleted file mode 100644 index 0b7a1af0cc56..000000000000 --- a/modules/eco-self-node-remediation-about-watchdog.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: CONCEPT -[id="about-watchdog-devices_{context}"] -= About watchdog devices - -Watchdog devices can be any of the following: - -* Independently powered hardware devices -* Hardware devices that share power with the hosts they control -* Virtual devices implemented in software, or `softdog` - -Hardware watchdog and `softdog` devices have electronic or software timers, respectively. These watchdog devices are used to ensure that the machine enters a safe state when an error condition is detected. The cluster is required to repeatedly reset the watchdog timer to prove that it is in a healthy state. This timer might elapse due to fault conditions, such as deadlocks, CPU starvation, and loss of network or disk access. If the timer expires, the watchdog device assumes that a fault has occurred and the device triggers a forced reset of the node. - -Hardware watchdog devices are more reliable than `softdog` devices. - -[id="understanding-pp-watchdog_{context}"] -== Understanding Self Node Remediation Operator behavior with watchdog devices - -The Self Node Remediation Operator determines the remediation strategy based on the watchdog devices that are present. - -If a hardware watchdog device is configured and available, the Operator uses it for remediation. If a hardware watchdog device is not configured, the Operator enables and uses a `softdog` device for remediation. - -If neither watchdog devices are supported, either by the system or by the configuration, the Operator remediates nodes by using software reboot. \ No newline at end of file diff --git a/modules/eco-self-node-remediation-operator-about.adoc b/modules/eco-self-node-remediation-operator-about.adoc deleted file mode 100644 index cf4ffa94d28a..000000000000 --- a/modules/eco-self-node-remediation-operator-about.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: CONCEPT -[id="about-self-node-remediation-operator_{context}"] -= About the Self Node Remediation Operator - -The Self Node Remediation Operator runs on the cluster nodes and reboots nodes that are identified as unhealthy. The Operator uses the `MachineHealthCheck` or `NodeHealthCheck` controller to detect the health of a node in the cluster. When a node is identified as unhealthy, the `MachineHealthCheck` or the `NodeHealthCheck` resource creates the `SelfNodeRemediation` custom resource (CR), which triggers the Self Node Remediation Operator. - -The `SelfNodeRemediation` CR resembles the following YAML file: - -[source,yaml] ----- -apiVersion: self-node-remediation.medik8s.io/v1alpha1 -kind: SelfNodeRemediation -metadata: - name: selfnoderemediation-sample - namespace: openshift-operators -spec: -status: - lastError: <last_error_message> <1> ----- - -<1> Displays the last error that occurred during remediation. When remediation succeeds or if no errors occur, the field is left empty. - -The Self Node Remediation Operator minimizes downtime for stateful applications and restores compute capacity if transient failures occur. You can use this Operator regardless of the management interface, such as IPMI or an API to provision a node, and regardless of the cluster installation type, such as installer-provisioned infrastructure or user-provisioned infrastructure. diff --git a/modules/eco-self-node-remediation-operator-configuring.adoc b/modules/eco-self-node-remediation-operator-configuring.adoc deleted file mode 100644 index 2345a3bdd55b..000000000000 --- a/modules/eco-self-node-remediation-operator-configuring.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: CONCEPT -[id="configuring-self-node-remediation-operator_{context}"] -= Configuring the Self Node Remediation Operator - -The Self Node Remediation Operator creates the `SelfNodeRemediationConfig` CR and the `SelfNodeRemediationTemplate` Custom Resource Definition (CRD). - -[id="understanding-self-node-remediation-operator-config_{context}"] -== Understanding the Self Node Remediation Operator configuration - -The Self Node Remediation Operator creates the `SelfNodeRemediationConfig` CR with the name `self-node-remediation-config`. The CR is created in the namespace of the Self Node Remediation Operator. - -A change in the `SelfNodeRemediationConfig` CR re-creates the Self Node Remediation daemon set. - -The `SelfNodeRemediationConfig` CR resembles the following YAML file: - -[source,yaml] ----- -apiVersion: self-node-remediation.medik8s.io/v1alpha1 -kind: SelfNodeRemediationConfig -metadata: - name: self-node-remediation-config - namespace: openshift-operators -spec: - safeTimeToAssumeNodeRebootedSeconds: 180 <1> - watchdogFilePath: /dev/watchdog <2> - isSoftwareRebootEnabled: true <3> - apiServerTimeout: 15s <4> - apiCheckInterval: 5s <5> - maxApiErrorThreshold: 3 <6> - peerApiServerTimeout: 5s <7> - peerDialTimeout: 5s <8> - peerRequestTimeout: 5s <9> - peerUpdateInterval: 15m <10> ----- - -<1> Specify the timeout duration for the surviving peer, after which the Operator can assume that an unhealthy node has been rebooted. The Operator automatically calculates the lower limit for this value. However, if different nodes have different watchdog timeouts, you must change this value to a higher value. -<2> Specify the file path of the watchdog device in the nodes. If you enter an incorrect path to the watchdog device, the Self Node Remediation Operator automatically detects the softdog device path. -+ -If a watchdog device is unavailable, the `SelfNodeRemediationConfig` CR uses a software reboot. -<3> Specify if you want to enable software reboot of the unhealthy nodes. By default, the value of `isSoftwareRebootEnabled` is set to `true`. To disable the software reboot, set the parameter value to `false`. -<4> Specify the timeout duration to check connectivity with each API server. When this duration elapses, the Operator starts remediation. The timeout duration must be greater than or equal to 10 milliseconds. -<5> Specify the frequency to check connectivity with each API server. The timeout duration must be greater than or equal to 1 second. -<6> Specify a threshold value. After reaching this threshold, the node starts contacting its peers. The threshold value must be greater than or equal to 1 second. -<7> Specify the duration of the timeout for the peer to connect the API server. The timeout duration must be greater than or equal to 10 milliseconds. -<8> Specify the duration of the timeout for establishing connection with the peer. The timeout duration must be greater than or equal to 10 milliseconds. -<9> Specify the duration of the timeout to get a response from the peer. The timeout duration must be greater than or equal to 10 milliseconds. -<10> Specify the frequency to update peer information, such as IP address. The timeout duration must be greater than or equal to 10 seconds. - -[NOTE] -==== -You can edit the `self-node-remediation-config` CR that is created by the Self Node Remediation Operator. However, when you try to create a new CR for the Self Node Remediation Operator, the following message is displayed in the logs: - -[source,text] ----- -controllers.SelfNodeRemediationConfig -ignoring selfnoderemediationconfig CRs that are not named 'self-node-remediation-config' -or not in the namespace of the operator: -'openshift-operators' {"selfnoderemediationconfig": -"openshift-operators/selfnoderemediationconfig-copy"} ----- -==== - -[id="understanding-self-node-remediation-remediation-template-config_{context}"] -== Understanding the Self Node Remediation Template configuration - -The Self Node Remediation Operator also creates the `SelfNodeRemediationTemplate` Custom Resource Definition (CRD). This CRD defines the remediation strategy for the nodes. The following remediation strategies are available: - -`ResourceDeletion`:: This remediation strategy removes the pods and associated volume attachments on the node rather than the node object. This strategy helps to recover workloads faster. `ResourceDeletion` is the default remediation strategy. - -`NodeDeletion`:: This remediation strategy is deprecated and will be removed in a future release. In the current release, the `ResourceDeletion` strategy is used even if the `NodeDeletion` strategy is selected. - -The Self Node Remediation Operator creates the `SelfNodeRemediationTemplate` CR for the strategy `self-node-remediation-resource-deletion-template`, which the `ResourceDeletion` remediation strategy uses. - -The `SelfNodeRemediationTemplate` CR resembles the following YAML file: - -[source,yaml] ----- -apiVersion: self-node-remediation.medik8s.io/v1alpha1 -kind: SelfNodeRemediationTemplate -metadata: - creationTimestamp: "2022-03-02T08:02:40Z" - name: self-node-remediation-<remediation_object>-deletion-template <1> - namespace: openshift-operators -spec: - template: - spec: - remediationStrategy: <remediation_strategy> <2> ----- -<1> Specifies the type of remediation template based on the remediation strategy. Replace `<remediation_object>` with either `resource` or `node`; for example, `self-node-remediation-resource-deletion-template`. -//<2> Specifies the remediation strategy. The remediation strategy can either be `ResourceDeletion` or `NodeDeletion`. -<2> Specifies the remediation strategy. The remediation strategy is `ResourceDeletion`. diff --git a/modules/eco-self-node-remediation-operator-control-plane-fencing.adoc b/modules/eco-self-node-remediation-operator-control-plane-fencing.adoc deleted file mode 100644 index d34ae660e348..000000000000 --- a/modules/eco-self-node-remediation-operator-control-plane-fencing.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-node-health-check-operator.adoc - -:_content-type: CONCEPT -[id="control-plane-fencing-self-node-remediation-operator_{context}"] -= Control plane fencing - -In earlier releases, you could enable Self Node Remediation and Node Health Check on worker nodes. In the event of node failure, you can now also follow remediation strategies on control plane nodes. - -Self Node Remediation occurs in two primary scenarios. - -* API Server Connectivity -** In this scenario, the control plane node to be remediated is not isolated. It can be directly connected to the API Server, or it can be indirectly connected to the API Server through worker nodes or control-plane nodes, that are directly connected to the API Server. -** When there is API Server Connectivity, the control plane node is remediated only if the Node Health Check Operator has created a `SelfNodeRemediation` custom resource (CR) for the node. - -* No API Server Connectivity -** In this scenario, the control plane node to be remediated is isolated from the API Server. The node cannot connect directly or indirectly to the API Server. -** When there is no API Server Connectivity, the control plane node will be remediated as outlined with these steps: - - -*** Check the status of the control plane node with the majority of the peer worker nodes. If the majority of the peer worker nodes cannot be reached, the node will be analyzed further. -**** Self-diagnose the status of the control plane node -***** If self diagnostics passed, no action will be taken. -***** If self diagnostics failed, the node will be fenced and remediated. -***** The self diagnostics currently supported are checking the `kubelet` service status, and checking endpoint availability using `opt in` configuration. -*** If the node did not manage to communicate to most of its worker peers, check the connectivity of the control plane node with other control plane nodes. If the node can communicate with any other control plane peer, no action will be taken. Otherwise, the node will be fenced and remediated. diff --git a/modules/eco-self-node-remediation-operator-installation-cli.adoc b/modules/eco-self-node-remediation-operator-installation-cli.adoc deleted file mode 100644 index e68ea8230e84..000000000000 --- a/modules/eco-self-node-remediation-operator-installation-cli.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: PROCEDURE -[id="installing-self-node-remediation-operator-using-cli_{context}"] -= Installing the Self Node Remediation Operator by using the CLI - -You can use the OpenShift CLI (`oc`) to install the Self Node Remediation Operator. - -You can install the Self Node Remediation Operator in your own namespace or in the `openshift-operators` namespace. - -To install the Operator in your own namespace, follow the steps in the procedure. - -To install the Operator in the `openshift-operators` namespace, skip to step 3 of the procedure because the steps to create a new `Namespace` custom resource (CR) and an `OperatorGroup` CR are not required. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `Namespace` custom resource (CR) for the Self Node Remediation Operator: -.. Define the `Namespace` CR and save the YAML file, for example, `self-node-remediation-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: self-node-remediation ----- -.. To create the `Namespace` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f self-node-remediation-namespace.yaml ----- - -. Create an `OperatorGroup` CR: -.. Define the `OperatorGroup` CR and save the YAML file, for example, `self-node-remediation-operator-group.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: self-node-remediation-operator - namespace: self-node-remediation ----- -.. To create the `OperatorGroup` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f self-node-remediation-operator-group.yaml ----- - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `self-node-remediation-subscription.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: self-node-remediation-operator - namespace: self-node-remediation <1> -spec: - channel: stable - installPlanApproval: Manual <2> - name: self-node-remediation-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - package: self-node-remediation ----- -<1> Specify the `Namespace` where you want to install the Self Node Remediation Operator. To install the Self Node Remediation Operator in the `openshift-operators` namespace, specify `openshift-operators` in the `Subscription` CR. -<2> Set the approval strategy to Manual in case your specified version is superseded by a later version in the catalog. This plan prevents an automatic upgrade to a later version and requires manual approval before the starting CSV can complete the installation. - -.. To create the `Subscription` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f self-node-remediation-subscription.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource: -+ -[source,terminal] ----- -$ oc get csv -n self-node-remediation ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -self-node-remediation.v.0.4.0 Self Node Remediation Operator v.0.4.0 Succeeded ----- - -. Verify that the Self Node Remediation Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n self-node-remediation ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -self-node-remediation-controller-manager 1/1 1 1 28h ----- - -. Verify that the Self Node Remediation Operator created the `SelfNodeRemediationConfig` CR: -+ -[source,terminal] ----- -$ oc get selfnoderemediationconfig -n self-node-remediation ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -self-node-remediation-config 28h ----- -. Verify that each self node remediation pod is scheduled and running on each worker node: -+ -[source,terminal] ----- -$ oc get daemonset -n self-node-remediation ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -self-node-remediation-ds 3 3 3 3 3 <none> 28h ----- -+ -[NOTE] -==== -This command is unsupported for the control plane nodes. -==== \ No newline at end of file diff --git a/modules/eco-self-node-remediation-operator-installation-web-console.adoc b/modules/eco-self-node-remediation-operator-installation-web-console.adoc deleted file mode 100644 index 1f44cdb63811..000000000000 --- a/modules/eco-self-node-remediation-operator-installation-web-console.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// *nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: PROCEDURE -[id="installing-self-node-remediation-operator-using-web-console_{context}"] -= Installing the Self Node Remediation Operator by using the web console - -You can use the {product-title} web console to install the Self Node Remediation Operator. - -[NOTE] -==== -The Node Health Check Operator also installs the Self Node Remediation Operator as a default remediation provider. -==== - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the Self Node Remediation Operator from the list of available Operators, and then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator is installed to the `openshift-operators` namespace. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `openshift-operators` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `self-node-remediation-controller-manager` project that are reporting issues. diff --git a/modules/eco-self-node-remediation-operator-troubleshooting.adoc b/modules/eco-self-node-remediation-operator-troubleshooting.adoc deleted file mode 100644 index 26935f3bcbf0..000000000000 --- a/modules/eco-self-node-remediation-operator-troubleshooting.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/eco-self-node-remediation-operator.adoc - -:_content-type: REFERENCE -[id="troubleshooting-self-node-remediation-operator_{context}"] -= Troubleshooting the Self Node Remediation Operator - -[id="general-troubleshooting-self-node-remediation-operator_{context}"] -== General troubleshooting - -Issue:: -You want to troubleshoot issues with the Self Node Remediation Operator. - -Resolution:: -Check the Operator logs. - -[id="checking-daemon-set_{context}"] -== Checking the daemon set -Issue:: The Self Node Remediation Operator is installed but the daemon set is not available. - -Resolution:: Check the Operator logs for errors or warnings. - -[id="unsuccessful_remediation{context}"] -== Unsuccessful remediation -Issue:: An unhealthy node was not remediated. - -Resolution:: Verify that the `SelfNodeRemediation` CR was created by running the following command: -+ -[source,terminal] ----- -$ oc get snr -A ----- -+ -If the `MachineHealthCheck` controller did not create the `SelfNodeRemediation` CR when the node turned unhealthy, check the logs of the `MachineHealthCheck` controller. Additionally, ensure that the `MachineHealthCheck` CR includes the required specification to use the remediation template. -+ -If the `SelfNodeRemediation` CR was created, ensure that its name matches the unhealthy node or the machine object. - -[id="daemon-set-exists_{context}"] -== Daemon set and other Self Node Remediation Operator resources exist even after uninstalling the Operator -Issue:: The Self Node Remediation Operator resources, such as the daemon set, configuration CR, and the remediation template CR, exist even after after uninstalling the Operator. - -Resolution:: To remove the Self Node Remediation Operator resources, delete the resources by running the following commands for each resource type: -+ -[source,terminal] ----- -$ oc delete ds <self-node-remediation-ds> -n <namespace> ----- -+ -[source,terminal] ----- -$ oc delete snrc <self-node-remediation-config> -n <namespace> ----- -+ -[source,terminal] ----- -$ oc delete snrt <self-node-remediation-template> -n <namespace> ----- \ No newline at end of file diff --git a/modules/eco-setting-node-maintenance-actions-web-console.adoc b/modules/eco-setting-node-maintenance-actions-web-console.adoc deleted file mode 100644 index 40caf350883c..000000000000 --- a/modules/eco-setting-node-maintenance-actions-web-console.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-setting-node-maintenance-actions-web-console_{context}"] -= Setting a bare-metal node to maintenance mode -Set a bare-metal node to maintenance mode using the Options menu {kebab} found on each node in the *Compute* -> *Nodes* list, or using the *Actions* control of the *Node Details* screen. - -.Procedure - -. From the *Administrator* perspective of the web console, click *Compute* -> *Nodes*. -. You can set the node to maintenance from this screen, which makes it easier to perform actions on multiple nodes, or from the *Node Details* screen, where you can view comprehensive details of the selected node: -** Click the Options menu {kebab} at the end of the node and select *Start Maintenance*. -** Click the node name to open the *Node Details* screen and click -*Actions* -> *Start Maintenance*. -. Click *Start Maintenance* in the confirmation window. - -The node is no longer schedulable. If it had virtual machines with the `LiveMigration` eviction strategy, then it will live migrate them. All other pods and virtual machines on the node are deleted and recreated on another node. - -.Verification - -* Navigate to the *Compute* -> *Nodes* page and verify that the corresponding node has a status of `Under maintenance`. diff --git a/modules/eco-setting-node-maintenance-cr-cli.adoc b/modules/eco-setting-node-maintenance-cr-cli.adoc deleted file mode 100644 index 31ef35f46993..000000000000 --- a/modules/eco-setting-node-maintenance-cr-cli.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-setting-node-maintenance-cr-cli_{context}"] -= Setting a node to maintenance mode by using the CLI - -You can put a node into maintenance mode with a `NodeMaintenance` custom resource (CR). When you apply a `NodeMaintenance` CR, all allowed pods are evicted and the node is rendered unschedulable. Evicted pods are queued to be moved to another node in the cluster. - -.Prerequisites - -* Install the {product-title} CLI `oc`. -* Log in to the cluster as a user with `cluster-admin` privileges. - -.Procedure - -. Create the following `NodeMaintenance` CR, and save the file as `nodemaintenance-cr.yaml`: -+ -[source,yaml] ----- -apiVersion: nodemaintenance.medik8s.io/v1beta1 -kind: NodeMaintenance -metadata: - name: nodemaintenance-cr <1> -spec: - nodeName: node-1.example.com <2> - reason: "NIC replacement" <3> ----- -<1> The name of the node maintenance CR. -<2> The name of the node to be put into maintenance mode. -<3> A plain text description of the reason for maintenance. -+ -. Apply the node maintenance CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f nodemaintenance-cr.yaml ----- - -.Verification - -. Check the progress of the maintenance task by running the following command: -+ -[source,terminal] ----- -$ oc describe node <node-name> ----- -+ -where `<node-name>` is the name of your node; for example, `node-1.example.com` - -. Check the example output: -+ -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal NodeNotSchedulable 61m kubelet Node node-1.example.com status is now: NodeNotSchedulable ----- diff --git a/modules/eco-setting-node-maintenance-cr-web-console.adoc b/modules/eco-setting-node-maintenance-cr-web-console.adoc deleted file mode 100644 index 77b6371406ef..000000000000 --- a/modules/eco-setting-node-maintenance-cr-web-console.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -//nodes/nodes/eco-node-maintenance-operator.adoc - -:_content-type: PROCEDURE -[id="eco-setting-node-maintenance-web-console_{context}"] -= Setting a node to maintenance mode by using the web console - -To set a node to maintenance mode, you can create a `NodeMaintenance` custom resource (CR) by using the web console. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. -* Install the Node Maintenance Operator from the *OperatorHub*. - -.Procedure - -. From the *Administrator* perspective in the web console, navigate to *Operators* → *Installed Operators*. - -. Select the Node Maintenance Operator from the list of Operators. - -. In the *Node Maintenance* tab, click *Create NodeMaintenance*. - -. In the *Create NodeMaintenance* page, select the *Form view* or the *YAML view* to configure the `NodeMaintenance` CR. - -. To apply the `NodeMaintenance` CR that you have configured, click *Create*. - -.Verification - -In the *Node Maintenance* tab, inspect the `Status` column and verify that its status is `Succeeded`. diff --git a/modules/edge-machine-pools-aws-local-zones.adoc b/modules/edge-machine-pools-aws-local-zones.adoc deleted file mode 100644 index 302d29b2b059..000000000000 --- a/modules/edge-machine-pools-aws-local-zones.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: CONCEPT -[id="edge-machine-pools-aws-local-zones_{context}"] -= Edge compute pools and AWS Local Zones - -Edge worker nodes are tainted worker nodes that run in AWS Local Zones locations. - -When deploying a cluster that uses Local Zones: - -* Amazon EC2 instances in the Local Zones are more expensive than Amazon EC2 instances in the Availability Zones. -* Latency between applications and end users is lower in Local Zones, and it may vary by location. There is a latency impact for some workloads if, for example, routers are mixed between Local Zones and Availability Zones. -* The cluster-network Maximum Transmission Unit (MTU) is adjusted automatically to the lower restricted by AWS when Local Zone subnets are detected on the `install-config.yaml`, according to the network plugin. For example, the adjusted values are 1200 for OVN-Kubernetes and 1250 for OpenShift SDN. If additional features are enabled, manual MTU adjustment can be necessary. - -[IMPORTANT] -==== -Generally, the Maximum Transmission Unit (MTU) between an Amazon EC2 instance in a Local Zone and an Amazon EC2 instance in the Region is 1300. For more information, see link:https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html[How Local Zones work] in the AWS documentation. -The cluster network MTU must be always less than the EC2 MTU to account for the overhead. The specific overhead is determined by the network plugin, for example: - -- OVN-Kubernetes: `100 bytes` -- OpenShift SDN: `50 bytes` - -The network plugin can provide additional features, like IPsec, that also must be decreased the MTU. For additional information, see the documentation. -==== diff --git a/modules/enable-aws-access.adoc b/modules/enable-aws-access.adoc deleted file mode 100644 index 82c2bf046caf..000000000000 --- a/modules/enable-aws-access.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: CONCEPT -[id="enable-aws-access"] -= Understanding AWS cloud infrastructure access - -[NOTE] -==== -AWS cloud infrastructure access does not apply to the Customer Cloud Subscription (CCS) infrastructure type that is chosen when you create a cluster because CCS clusters are deployed onto your account. -==== - - -{AWS} infrastructure access permits link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators] and cluster owners to enable AWS Identity and Access Management (IAM) users to have federated access to the AWS Management Console for their {product-title} cluster. AWS access can be granted for customer AWS users, and private cluster access can be implemented to suit the needs of your {product-title} environment. - -. Get started with configuring AWS infrastructure access for your {product-title} cluster. By creating an AWS user and account and providing that user with access to the {product-title} AWS account. - -. After you have access to the {product-title} AWS account, use one or more of the following methods to establish a private connection to your cluster: - -- Configuring AWS VPC peering: Enable VPC peering to route network traffic between two private IP addresses. - -- Configuring AWS VPN: Establish a Virtual Private Network to securely connect your private network to your Amazon Virtual Private Cloud. - -- Configuring AWS Direct Connect: Configure AWS Direct Connect to establish a dedicated network connection between your private network and an AWS Direct Connect location. - -// TODO: Was this supposed to be an xref that got yanked? Looks a little odd as is. I'd yank this and add it as an xref in an additional resources or next steps section in the assembly. -After configuring your cloud infrastructure access, learn more about Configuring a private cluster. diff --git a/modules/enable-private-cluster-existing.adoc b/modules/enable-private-cluster-existing.adoc deleted file mode 100644 index 7b4e3fdfe902..000000000000 --- a/modules/enable-private-cluster-existing.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/private-cluster.adoc - -:_content-type: PROCEDURE -[id="enable-private-cluster-existing_{context}"] -= Enabling an existing cluster to be private - - -After a cluster has been created, you can later enable the cluster to be private. - -.Prerequisites - -* The following private connections must be configured to allow private access: -** VPC Peering -** Cloud VPN -** DirectConnect (AWS only) -** TransitGateway (AWS only) -** Cloud Interconnect (GCP only) - -.Procedure - -. Log in to {cluster-manager-url}. - -. Select the public cluster you would like to make private. - -. On the *Networking* tab, select *Make API private* under *Control Plane API endpoint*. -+ - -[WARNING] -==== -When set to *Private*, you cannot access your cluster unless you have configured the private connections in your cloud provider as outlined in the prerequisites. -==== - -. Click *Change settings*. -+ -[NOTE] -==== -Transitioning your cluster between private and public can take several minutes to complete. -==== diff --git a/modules/enable-private-cluster-new.adoc b/modules/enable-private-cluster-new.adoc deleted file mode 100644 index d65f85c3d6bd..000000000000 --- a/modules/enable-private-cluster-new.adoc +++ /dev/null @@ -1,40 +0,0 @@ - -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/private-cluster.adoc - -:_content-type: PROCEDURE -[id="enable-private-cluster-new_{context}"] -= Enabling a private cluster during cluster creation - - -You can enable private cluster settings when creating a new cluster. - -.Prerequisites - -* The following private connections must be configured to allow private access: -** VPC Peering -** Cloud VPN -** DirectConnect (AWS only) -** TransitGateway (AWS only) -** Cloud Interconnect (GCP only) - - -.Procedure - -. Log in to {cluster-manager-url}. -. Click *Create cluster* -> *{product-title}* -> *Create cluster*. -. Configure your cluster details. -. When selecting your preferred network configuration, select *Advanced*. -. Select *Private*. -+ -[WARNING] -==== -When set to *Private*, you cannot access your cluster unless you have configured the private connections in your cloud provider as outlined in the prerequisites. -==== - -. Click *Create cluster*. The cluster creation process begins and takes about 30-40 minutes to complete. - -.Verification - -* The *Installing cluster* heading, under the *Overview* tab, indicates that the cluster is installing and you can view the installation logs from this heading. The *Status* indicator under the *Details* heading indicates when your cluster is *Ready* for use. diff --git a/modules/enable-public-cluster.adoc b/modules/enable-public-cluster.adoc deleted file mode 100644 index 04e7954a735e..000000000000 --- a/modules/enable-public-cluster.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/private-cluster.adoc - -:_content-type: PROCEDURE -[id="enable-public-cluster_{context}"] -= Enabling an existing private cluster to be public -// TODO: These wordings of "enabling the cluster "to be public/private" could probably be improved. At the very least, these two modules should probably use "Configuring" instead of "Enabling", as it is worded now. - -After a private cluster has been created, you can later enable the cluster to be public. - -.Procedure - -. Log in to {cluster-manager-url}. - -. Select the private cluster you would like to make public. - -. On the *Networking* tab, deselect *Make API private* under *Control Plane API endpoint*. - -. Click *Change settings*. -+ -[NOTE] -==== -Transitioning your cluster between private and public can take several minutes to complete. -==== diff --git a/modules/enabling-additional-enabled-capabilities.adoc b/modules/enabling-additional-enabled-capabilities.adoc deleted file mode 100644 index 3f20ee5b09a0..000000000000 --- a/modules/enabling-additional-enabled-capabilities.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// *post_installation_configuration/cluster-capabilities.adoc - -[id="setting_additional_enabled_capabilities_{context}"] -= Enabling the cluster capabilities by setting additional enabled capabilities - -As a cluster administrator, you can enable the cluster capabilities by setting `additionalEnabledCapabilities`. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. View the additional enabled capabilities by running the following command: -+ -[source,terminal] ----- -$ oc get clusterversion version -o jsonpath='{.spec.capabilities.additionalEnabledCapabilities}{"\n"}' ----- - -+ -.Example output -[source,terminal] ----- -["openshift-samples"] ----- - -. To set the `additionalEnabledCapabilities`, run the following command: -+ -[source,terminal] ----- -$ oc patch clusterversion/version --type merge -p '{"spec":{"capabilities":{"additionalEnabledCapabilities":["openshift-samples", "marketplace"]}}}' ----- - -[IMPORTANT] -==== -It is not possible to disable a capability which is already enabled in a cluster. The cluster version Operator (CVO) continues to reconcile the capability which is already enabled in the cluster. -==== - - -If you try to disable a capability, then CVO shows the divergent spec: -[source,terminal] ----- -$ oc get clusterversion version -o jsonpath='{.status.conditions[?(@.type=="ImplicitlyEnabledCapabilities")]}{"\n"}' ----- - -.Example output -[source,terminal] ----- -{"lastTransitionTime":"2022-07-22T03:14:35Z","message":"The following capabilities could not be disabled: openshift-samples","reason":"CapabilitiesImplicitlyEnabled","status":"True","type":"ImplicitlyEnabledCapabilities"} ----- - -[NOTE] -==== -During the cluster upgrades, it is possible that a given capability could be implicitly enabled. If a resource was already running on the cluster before the upgrade, then any capabilities that is part of the resource will be enabled. For example, during a cluster upgrade, a resource that is already running on the cluster has been changed to be part of the `marketplace` capability by the system. Even if a cluster administrator does not explicitly enabled the `marketplace` capability, it is implicitly enabled by the system. -==== diff --git a/modules/enabling-baseline-capability-set.adoc b/modules/enabling-baseline-capability-set.adoc deleted file mode 100644 index b3a84816525c..000000000000 --- a/modules/enabling-baseline-capability-set.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// *post_installation_configuration/cluster-capabilities.adoc - -[id="setting_baseline_capability_set_{context}"] -= Enabling the cluster capabilities by setting baseline capability set - -As a cluster administrator, you can enable the capabilities by setting `baselineCapabilitySet`. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -* To set the `baselineCapabilitySet`, run the following command: -+ -[source,terminal] ----- -$ oc patch clusterversion version --type merge -p '{"spec":{"capabilities":{"baselineCapabilitySet":"vCurrent"}}}' <1> ----- -+ -<1> For `baselineCapabilitySet` you can specify `vCurrent`, `v{product-version}`, or `None`. - -include::snippets/capabilities-table.adoc[] diff --git a/modules/enabling-encapsulation.adoc b/modules/enabling-encapsulation.adoc deleted file mode 100644 index faaf4cdfe183..000000000000 --- a/modules/enabling-encapsulation.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/optimization/optimizing-cpu-usage.adoc - -:_content-type: PROCEDURE -[id="enabling-encapsulation_{context}"] -= Configuring mount namespace encapsulation - -You can configure mount namespace encapsulation so that a cluster runs with less resource overhead. - -[NOTE] -==== -Mount namespace encapsulation is a Technology Preview feature and it is disabled by default. To use it, you must enable the feature manually. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a file called `mount_namespace_config.yaml` with the following YAML: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master - name: 99-kubens-master -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - enabled: true - name: kubens.service ---- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 99-kubens-worker -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - enabled: true - name: kubens.service ----- - -. Apply the mount namespace `MachineConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f mount_namespace_config.yaml ----- -+ -.Example output -[source,terminal] ----- -machineconfig.machineconfiguration.openshift.io/99-kubens-master created -machineconfig.machineconfiguration.openshift.io/99-kubens-worker created ----- - -. The `MachineConfig` CR can take up to 30 minutes to finish being applied in the cluster. You can check the status of the `MachineConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-03d4bc4befb0f4ed3566a2c8f7636751 False True False 3 0 0 0 45m -worker rendered-worker-10577f6ab0117ed1825f8af2ac687ddf False True False 3 1 1 ----- - -. Wait for the `MachineConfig` CR to be applied successfully across all control plane and worker nodes after running the following command: -+ -[source,terminal] ----- -$ oc wait --for=condition=Updated mcp --all --timeout=30m ----- -+ -.Example output -[source,terminal] ----- -machineconfigpool.machineconfiguration.openshift.io/master condition met -machineconfigpool.machineconfiguration.openshift.io/worker condition met ----- - -.Verification - -To verify encapsulation for a cluster host, run the following commands: - -. Open a debug shell to the cluster host: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Open a `chroot` session: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -. Check the systemd mount namespace: -+ -[source,terminal] ----- -sh-4.4# readlink /proc/1/ns/mnt ----- -+ -.Example output -[source,terminal] ----- -mnt:[4026531953] ----- - -. Check kubelet mount namespace: -+ -[source,terminal] ----- -sh-4.4# readlink /proc/$(pgrep kubelet)/ns/mnt ----- -+ -.Example output -[source,terminal] ----- -mnt:[4026531840] ----- - -. Check the CRI-O mount namespace: -+ -[source,terminal] ----- -sh-4.4# readlink /proc/$(pgrep crio)/ns/mnt ----- -+ -.Example output -[source,terminal] ----- -mnt:[4026531840] ----- - -These commands return the mount namespaces associated with systemd, kubelet, and the container runtime. In {product-title}, the container runtime is CRI-O. - -Encapsulation is in effect if systemd is in a different mount namespace to kubelet and CRI-O as in the above example. -Encapsulation is not in effect if all three processes are in the same mount namespace. diff --git a/modules/enabling-etcd-encryption.adoc b/modules/enabling-etcd-encryption.adoc deleted file mode 100644 index a639a82f6066..000000000000 --- a/modules/enabling-etcd-encryption.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * security/encrypting-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="enabling-etcd-encryption_{context}"] -= Enabling etcd encryption - -You can enable etcd encryption to encrypt sensitive resources in your cluster. - -[WARNING] -==== -Do not back up etcd resources until the initial encryption process is completed. If the encryption process is not completed, the backup might be only partially encrypted. - -After you enable etcd encryption, several changes can occur: - -* The etcd encryption might affect the memory consumption of a few resources. -* You might notice a transient affect on backup performance because the leader must serve the backup. -* A disk I/O can affect the node that receives the backup state. -==== - -You can encrypt the etcd database in either AES-GCM or AES-CBC encryption. - -[NOTE] -==== -To migrate your etcd database from one encryption type to the other, you can modify the API server's `spec.encryption.type` field. Migration of the etcd data to the new encryption type occurs automatically. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Modify the `APIServer` object: -+ -[source,terminal] ----- -$ oc edit apiserver ----- - -. Set the `spec.encryption.type` field to `aesgcm` or `aescbc`: -+ -[source,yaml] ----- -spec: - encryption: - type: aesgcm <1> ----- -<1> Set to `aesgcm` for AES-GCM encryption or `aescbc` for AES-CBC encryption. - -. Save the file to apply the changes. -+ -The encryption process starts. It can take 20 minutes or longer for this process to complete, depending on the size of the etcd database. - -. Verify that etcd encryption was successful. - -.. Review the `Encrypted` status condition for the OpenShift API server to verify that its resources were successfully encrypted: -+ -[source,terminal] ----- -$ oc get openshiftapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `EncryptionCompleted` upon successful encryption: -+ -[source,terminal] ----- -EncryptionCompleted -All resources encrypted: routes.route.openshift.io ----- -+ -If the output shows `EncryptionInProgress`, encryption is still in progress. Wait a few minutes and try again. - -.. Review the `Encrypted` status condition for the Kubernetes API server to verify that its resources were successfully encrypted: -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `EncryptionCompleted` upon successful encryption: -+ -[source,terminal] ----- -EncryptionCompleted -All resources encrypted: secrets, configmaps ----- -+ -If the output shows `EncryptionInProgress`, encryption is still in progress. Wait a few minutes and try again. - -.. Review the `Encrypted` status condition for the OpenShift OAuth API server to verify that its resources were successfully encrypted: -+ -[source,terminal] ----- -$ oc get authentication.operator.openshift.io -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -The output shows `EncryptionCompleted` upon successful encryption: -+ -[source,terminal] ----- -EncryptionCompleted -All resources encrypted: oauthaccesstokens.oauth.openshift.io, oauthauthorizetokens.oauth.openshift.io ----- -+ -If the output shows `EncryptionInProgress`, encryption is still in progress. Wait a few minutes and try again. diff --git a/modules/enabling-insights-advisor-recommendations.adoc b/modules/enabling-insights-advisor-recommendations.adoc deleted file mode 100644 index f11d3363ba36..000000000000 --- a/modules/enabling-insights-advisor-recommendations.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: PROCEDURE -[id="enabling-insights-advisor-recommendations_{context}"] -= Enabling a previously disabled Insights Advisor recommendation - -When a recommendation is disabled for all clusters, you will no longer see the recommendation in Insights Advisor. You can change this behavior. - -.Prerequisites - -* Remote health reporting is enabled, which is the default. -* Your cluster is registered on {cluster-manager-url}. -* You are logged in to {cluster-manager-url}. - -.Procedure - -. Navigate to *Advisor* -> *Recommendations* on {cluster-manager-url}. -. Filter the recommendations by *Status* -> *Disabled*. -. Locate the recommendation to enable. -. Click the *Options* menu {kebab}, and then click *Enable recommendation*. diff --git a/modules/enabling-internal-api-server-vsphere.adoc b/modules/enabling-internal-api-server-vsphere.adoc deleted file mode 100644 index db927aaf9c7d..000000000000 --- a/modules/enabling-internal-api-server-vsphere.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc - -:_content-type: PROCEDURE -[id="enabling-internal-api-server-vsphere_{context}"] -= Enabling communication with the internal API server for the WMCO on vSphere - -The Windows Machine Config Operator (WMCO) downloads the Ignition config files from the internal API server endpoint. You must enable communication with the internal API server so that your Windows virtual machine (VM) can download the Ignition config files, and the kubelet on the configured VM can only communicate with the internal API server. - -.Prerequisites - -* You have installed a cluster on vSphere. - -.Procedure - -* Add a new DNS entry for `api-int.<cluster_name>.<base_domain>` that points to the external API server URL `api.<cluster_name>.<base_domain>`. This can be a CNAME or an additional A record. - -[NOTE] -==== -The external API endpoint was already created as part of the initial cluster installation on vSphere. -==== diff --git a/modules/enabling-multi-cluster-console.adoc b/modules/enabling-multi-cluster-console.adoc deleted file mode 100644 index e8eb5c7bb5d4..000000000000 --- a/modules/enabling-multi-cluster-console.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/web-console.adoc - -:_content-type: PROCEDURE -[id="enable-multi-cluster-console_{context}"] -= Enabling multicluster in the web console - -:FeatureName: Multicluster console -include::snippets/technology-preview.adoc[leveloffset=+1] -// - -.Prerequisites -* Your cluster must be using the latest version of {product-title}. -* You must have link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.5/html/install/index[Red Hat Advanced Cluster Management (ACM) for Kubernetes 2.5] or the link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.5/html/multicluster_engine/index[multiculster engine (MCE) Operator] installed. -* You must have administrator privileges. - -[WARNING] -==== -Do not set this feature gate on production clusters. You will not be able to upgrade your cluster after applying the feature gate, and it cannot be undone. -==== - -.Procedure - -. Log in to the {product-title} web console using your credentials. - -. Enable ACM in the administrator perspective by navigating from *Administration* -> *Cluster Settings* -> *Configuration* -> *Console* `console.operator.openshift.io` -> *Console Plugins* and click *Enable* for `acm`. - -. A pop-up window will appear notifying you that updating the enablement of this console plugin will prompt for the console to be refreshed once it has been updated. Select `Enable` and click *Save*. - -. Repeat the previous two steps for the `mce` console plugin immediately after enabling `acm`. - -. A pop-up window that states that a web console update is available will appear a few moments after you enable. Click *Refresh the web console* in the pop-up window to update. -+ -[NOTE] -==== -You might see the pop-up window to refresh the web console twice if the second redeployment has not occurred by the time you click *Refresh the web console*. -==== - -** *local-cluster* and *All Clusters* is now visible above the perspectives in the navigation section. - -. Enable the feature gate by navigating from *Administration* -> *Cluster Settings* -> *Configuration* -> *FeatureGate*, and edit the YAML template as follows: -+ -[source,yaml] - ----- -spec: - featureSet: TechPreviewNoUpgrade ----- - -. Click *Save* to enable the multicluster console for all clusters. -+ -[IMPORTANT] -==== -After you save, this feature is enabled and cannot be undone. -==== diff --git a/modules/enabling-plug-in-browser.adoc b/modules/enabling-plug-in-browser.adoc deleted file mode 100644 index a58ddc2c0c1e..000000000000 --- a/modules/enabling-plug-in-browser.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plug-ins.adoc - -:_content-type: PROCEDURE -[id="enable-plug-in-browser_{context}"] -= Enable dynamic plugins in the web console -Cluster administrators can enable plugins in the web console browser. Dynamic plugins are disabled by default. In order to enable, a cluster administrator will need to enable them in the `console-operator` configuration. - -.Procedure - -. In the *Administration* -> *Cluster Settings* page of the web console, click the *Configuration* tab. - -. Click the `Console` `operator.openshift.io` configuration resource. - -. From there, click the *Console plugins* tab to view the dynamic plugins running. - -. In the `Status` column, click `Enable console plugin` in the pop-over menu, which will launch the `Console plugin enablement` modal. - -. Click `Enable` and `Save`. - -.Verification - -* Refresh the browser to view the enabled plugin. diff --git a/modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc b/modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc deleted file mode 100644 index 071d522d529a..000000000000 --- a/modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: REFERENCE - -[id="ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes_{context}"] -= Additional details about VolumeAttributes on shared resource pod volumes - -[role="_abstract"] -The following attributes affect shared resource pod volumes in various ways: - -* The `refreshResource` attribute in the `volumeAttributes` properties. -* The `refreshResources` attribute in the Shared Resource CSI Driver configuration. -* The `sharedSecret` and `sharedConfigMap` attributes in the `volumeAttributes` properties. - -== The `refreshResource` attribute - -The Shared Resource CSI Driver honors the `refreshResource` attribute in `volumeAttributes` properties of the volume. This attribute controls whether updates to the contents of the underlying `Secret` or `ConfigMap` object are copied to the volume *after* the volume is initially provisioned as part of pod startup. The default value of `refreshResource` is `true`, which means that the contents are updated. - -[IMPORTANT] -==== -If the Shared Resource CSI Driver configuration has disabled the refreshing of both the shared `SharedSecret` and `SharedConfigMap` custom resource (CR) instances, then the `refreshResource` attribute in the `volumeAttribute` properties has no effect. The intent of this attribute is to disable refresh for specific volume mounts when refresh is generally allowed. -==== - -== The `refreshResources` attribute - -You can use a global switch to enable or disable refreshing of shared resources. This switch is the `refreshResources` attribute in the `csi-driver-shared-resource-config` config map for the Shared Resource CSI Driver, which you can find in the `openshift-cluster-csi-drivers` namespace. If you set this `refreshResources` attribute to `false`, none of the `Secret` or `ConfigMap` object-related content stored in the volume is updated after the initial provisioning of the volume. - -[IMPORTANT] -==== -Using this Shared Resource CSI Driver configuration to disable refreshing affects all the cluster's volume mounts that use the Shared Resource CSI Driver, regardless of the `refreshResource` attribute in the `volumeAttributes` properties of any of those volumes. -==== - -== Validation of volumeAttributes before provisioning a shared resource volume for a pod - -In the `volumeAttributes` of a single volume, you must set either a `sharedSecret` or a `sharedConfigMap` attribute to the value of a `SharedSecret` or a `SharedConfigMap` CS instance. Otherwise, when the volume is provisioned during pod startup, a validation checks the `volumeAttributes` of that volume and returns an error to the kubelet under the following conditions: - -* Both `sharedSecret` and `sharedConfigMap` attributes have specified values. -* Neither `sharedSecret` nor `sharedConfigMap` attributes have specified values. -* The value of the `sharedSecret` or `sharedConfigMap` attribute does not correspond to the name of a `SharedSecret` or `SharedConfigMap` CR instance on the cluster. diff --git a/modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc b/modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc deleted file mode 100644 index a205bece172a..000000000000 --- a/modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: REFERENCE - -[id="ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver_{context}"] -= Additional support limitations for the Shared Resource CSI Driver - -[role="_abstract"] -The Shared Resource CSI Driver has the following noteworthy limitations: - -* The driver is subject to the limitations of Container Storage Interface (CSI) inline ephemeral volumes. -* The value of the `readOnly` field must be `true`. On `Pod` creation, a validating admission webhook rejects the pod creation if `readOnly` is `false`. If for some reason the validating admission webhook cannot be contacted, on volume provisioning during pod startup, the driver returns an error to the kubelet. Requiring `readOnly` is `true` is in keeping with proposed best practices for the upstream Kubernetes CSI Driver to apply SELinux labels to associated volumes. -* The driver ignores the `FSType` field because it only supports `tmpfs` volumes. -* The driver ignores the `NodePublishSecretRef` field. Instead, it uses `SubjectAccessReviews` with the `use` verb to evaluate whether a pod can obtain a volume that contains `SharedSecret` or `SharedConfigMap` custom resource (CR) instances. -* You cannot create `SharedSecret` or `SharedConfigMap` custom resource (CR) instances whose names start with `openshift`. diff --git a/modules/ephemeral-storage-csi-inline-overview-admin-plugin.adoc b/modules/ephemeral-storage-csi-inline-overview-admin-plugin.adoc deleted file mode 100644 index 6ddf45dbcb9d..000000000000 --- a/modules/ephemeral-storage-csi-inline-overview-admin-plugin.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/container_storage_interface/ephemeral-storage-csi-inline.adoc - -:_content-type: CONCEPT -[id="ephemeral-storage-csi-overview-admin-plugin_{context}"] -= CSI Volume Admission plugin - -The Container Storage Interface (CSI) Volume Admission plugin allows you to restrict the use of an individual CSI driver capable of provisioning CSI ephemeral volumes on pod admission. Administrators can add a `csi-ephemeral-volume-profile` label, and this label is then inspected by the Admission plugin and used in enforcement, warning, and audit decisions. - -[id="overview-admission-plugin"] -== Overview - -To use the CSI Volume Admission plugin, administrators add the `security.openshift.io/csi-ephemeral-volume-profile` label to a `CSIDriver` object, which declares the CSI driver’s effective pod security profile when it is used to provide CSI ephemeral volumes, as shown in the following example: - -[source, yaml] ----- -kind: CSIDriver -metadata: - name: csi.mydriver.company.org - labels: - security.openshift.io/csi-ephemeral-volume-profile: restricted <1> ----- -<1> CSI driver object YAML file with the `csi-ephemeral-volume-profile` label set to "restricted" - -This “effective profile” communicates that a pod can use the CSI driver to mount CSI ephemeral volumes when the pod’s namespace is governed by a pod security standard. - -The CSI Volume Admission plugin inspects pod volumes when pods are created; existing pods that use CSI volumes are not affected. If a pod uses a container storage interface (CSI) volume, the plugin looks up the `CSIDriver` object and inspects the `csi-ephemeral-volume-profile` label, and then use the label’s value in its enforcement, warning, and audit decisions. - -[id="security-profile-enforcement"] -== Pod security profile enforcement - -When a CSI driver has the `csi-ephemeral-volume-profile` label, pods using the CSI driver to mount CSI ephemeral volumes must run in a namespace that enforces a pod security standard of equal or greater permission. If the namespace enforces a more restrictive standard, the CSI Volume Admission plugin denies admission. The following table describes the enforcement behavior for different pod security profiles for given label values. - -.Pod security profile enforcement -[cols=",^v,^v,^v,^v width="100%",options="header"] -|=== -|Pod security profile|Driver label: restricted| Driver label: baseline | Driver label: privileged - -|Restricted -|Allowed -|Denied -|Denied - -|Baseline -|Allowed -|Allowed -|Denied - -|Privileged -|Allowed -|Allowed -|Allowed -|=== - -[id="security-profile-warning"] -== Pod security profile warning -The CSI Volume Admission plugin can warn you if the CSI driver’s effective profile is more permissive than the pod security warning profile for the pod namespace. The following table shows when a warning occurs for different pod security profiles for given label values. - -.Pod security profile warning -[cols=",^v,^v,^v,^v width="100%",options="header"] -|=== -|Pod security profile|Driver label: restricted| Driver label: baseline | Driver label: privileged - -|Restricted -|No warning -|Warning -|Warning - -|Baseline -|No warning -|No warning -|Warning - -|Privileged -|No warning -|No warning -|No warning -|=== - -[id="security-profile-audit"] -== Pod security profile audit -The CSI Volume Admission plugin can apply audit annotations to the pod if the CSI driver’s effective profile is more permissive than the pod security audit profile for the pod namespace. The following table shows the audit annotation applied for different pod security profiles for given label values. - -.Pod security profile audit -[cols=",^v,^v,^v,^v width="100%",options="header"] -|=== -|Pod security profile|Driver label: restricted| Driver label: baseline | Driver label: privileged - -|Restricted -|No audit -|Audit -|Audit - -|Baseline -|No audit -|No audit -|Audit - -|Privileged -|No audit -|No audit -|No audit -|=== - -[id="admission-plugin-default-behavior"] -== Default behavior for the CSI Volume Admission plugin - -If the referenced CSI driver for a CSI ephemeral volume does not have the `csi-ephemeral-volume-profile` label, the CSI Volume Admission plugin considers the driver to have the privileged profile for enforcement, warning, and audit behaviors. Likewise, if the pod’s namespace does not have the pod security admission label set, the Admission plugin assumes the restricted profile is allowed for enforcement, warning, and audit decisions. Therefore, if no labels are set, CSI ephemeral volumes using that CSI driver are only usable in privileged namespaces by default. - -The CSI drivers that ship with {product-title} and support ephemeral volumes have a reasonable default set for the `csi-ephemeral-volume-profile` label: - -* Shared Resource CSI driver: restricted - -* Azure File CSI driver: privileged - -An admin can change the default value of the label if desired. \ No newline at end of file diff --git a/modules/ephemeral-storage-csi-inline-overview.adoc b/modules/ephemeral-storage-csi-inline-overview.adoc deleted file mode 100644 index cb97b8bdaf70..000000000000 --- a/modules/ephemeral-storage-csi-inline-overview.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/container_storage_interface/ephemeral-storage-csi-inline.adoc - -:_content-type: CONCEPT -[id="ephemeral-storage-csi-inline-overview_{context}"] -= Overview of CSI inline ephemeral volumes - -Traditionally, volumes that are backed by Container Storage Interface (CSI) drivers can only be used with a `PersistentVolume` and `PersistentVolumeClaim` object combination. - -This feature allows you to specify CSI volumes directly in the `Pod` specification, rather than in a `PersistentVolume` object. Inline volumes are ephemeral and do not persist across pod restarts. - -== Support limitations - -By default, {product-title} supports CSI inline ephemeral volumes with these limitations: - -* Support is only available for CSI drivers. In-tree and FlexVolumes are not supported. -* The Shared Resource CSI Driver supports using inline ephemeral volumes only to access `Secrets` or `ConfigMaps` across multiple namespaces as a Technology Preview feature. -* Community or storage vendors provide other CSI drivers that support these volumes. Follow the installation instructions provided by the CSI driver provider. - -CSI drivers might not have implemented the inline volume functionality, including `Ephemeral` capacity. For details, see the CSI driver documentation. - -:FeatureName: Shared Resource CSI Driver -include::snippets/technology-preview.adoc[leveloffset=+0] diff --git a/modules/ephemeral-storage-csi-inline-pod.adoc b/modules/ephemeral-storage-csi-inline-pod.adoc deleted file mode 100644 index 67f8c93e9cb2..000000000000 --- a/modules/ephemeral-storage-csi-inline-pod.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/container_storage_interface/ephemeral-storage-csi-inline-pod-scheduling.adoc - -:_content-type: PROCEDURE -[id="ephemeral-storage-csi-inline-pod_{context}"] -= Embedding a CSI inline ephemeral volume in the pod specification - -You can embed a CSI inline ephemeral volume in the `Pod` specification in {product-title}. At runtime, nested inline volumes follow the ephemeral lifecycle of their associated pods so that the CSI driver handles all phases of volume operations as pods are created and destroyed. - -.Procedure - -. Create the `Pod` object definition and save it to a file. - -. Embed the CSI inline ephemeral volume in the file. -+ -.my-csi-app.yaml -[source,yaml] ----- -kind: Pod -apiVersion: v1 -metadata: - name: my-csi-app -spec: - containers: - - name: my-frontend - image: busybox - volumeMounts: - - mountPath: "/data" - name: my-csi-inline-vol - command: [ "sleep", "1000000" ] - volumes: <1> - - name: my-csi-inline-vol - csi: - driver: inline.storage.kubernetes.io - volumeAttributes: - foo: bar ----- -<1> The name of the volume that is used by pods. - -. Create the object definition file that you saved in the previous step. -+ -[source,terminal] ----- -$ oc create -f my-csi-app.yaml ----- diff --git a/modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc b/modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc deleted file mode 100644 index 62691d8cd385..000000000000 --- a/modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: REFERENCE - -[id="ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds_{context}"] -= Integration between shared resources, Insights Operator, and {product-title} Builds - -[role="_abstract"] -Integration between shared resources, Insights Operator, and {product-title} Builds makes using Red Hat subscriptions (RHEL entitlements) easier in {product-title} Builds. - -Previously, in {product-title} 4.9.x and earlier, you manually imported your credentials and copied them to each project or namespace where you were running builds. - -Now, in {product-title} 4.10 and later, {product-title} Builds can use Red Hat subscriptions (RHEL entitlements) by referencing shared resources and the simple content access feature provided by Insights Operator: - -* The simple content access feature imports your subscription credentials to a well-known `Secret` object. See the links in the following "Additional resources" section. -* The cluster administrator creates a `SharedSecret` custom resource (CR) instance around that `Secret` object and grants permission to particular projects or namespaces. In particular, the cluster administrator gives the `builder` service account permission to use that `SharedSecret` CR instance. -* Builds that run within those projects or namespaces can mount a CSI Volume that references the `SharedSecret` CR instance and its entitled RHEL content. diff --git a/modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc b/modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc deleted file mode 100644 index 9da4cf670d59..000000000000 --- a/modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: PROCEDURE - -[id="ephemeral-storage-sharing-configmaps-across-namespaces_{context}"] -= Sharing a config map across namespaces - -[role="_abstract"] -To share a config map across namespaces in a cluster, you create a `SharedConfigMap` custom resource (CR) instance for that config map. - -.Prerequisites - -You must have permission to perform the following actions: - -* Create instances of the `sharedconfigmaps.sharedresource.openshift.io` custom resource definition (CRD) at a cluster-scoped level. -* Manage roles and role bindings across the namespaces in the cluster to control which users can get, list, and watch those instances. -* Manage roles and role bindings across the namespaces in the cluster to control which service accounts in pods that mount your Container Storage Interface (CSI) volume can use those instances. -* Access the namespaces that contain the Secrets you want to share. - -.Procedure - -. Create a `SharedConfigMap` CR instance for the config map that you want to share across namespaces in the cluster: -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -apiVersion: sharedresource.openshift.io/v1alpha1 -kind: SharedConfigMap -metadata: - name: my-share -spec: - configMapRef: - name: <name of configmap> - namespace: <namespace of configmap> -EOF ----- - -.Next steps diff --git a/modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc b/modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc deleted file mode 100644 index fd80147e7c5c..000000000000 --- a/modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: PROCEDURE - -[id="ephemeral-storage-sharing-secrets-across-namespaces_{context}"] -= Sharing secrets across namespaces - -[role="_abstract"] -To share a secret across namespaces in a cluster, you create a `SharedSecret` custom resource (CR) instance for the `Secret` object that you want to share. - -.Prerequisites - -You must have permission to perform the following actions: - -* Create instances of the `sharedsecrets.sharedresource.openshift.io` custom resource definition (CRD) at a cluster-scoped level. -* Manage roles and role bindings across the namespaces in the cluster to control which users can get, list, and watch those instances. -* Manage roles and role bindings to control whether the service account specified by a pod can mount a Container Storage Interface (CSI) volume that references the `SharedSecret` CR instance you want to use. -* Access the namespaces that contain the Secrets you want to share. - -.Procedure - -* Create a `SharedSecret` CR instance for the `Secret` object you want to share across namespaces in the cluster: -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -apiVersion: sharedresource.openshift.io/v1alpha1 -kind: SharedSecret -metadata: - name: my-share -spec: - secretRef: - name: <name of secret> - namespace: <namespace of secret> -EOF ----- diff --git a/modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc b/modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc deleted file mode 100644 index 552ea3fc2c29..000000000000 --- a/modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: PROCEDURE - -[id="ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod_{context}"] -= Using a SharedConfigMap instance in a pod - -[role="_abstract"] -To access a `SharedConfigMap` custom resource (CR) instance from a pod, you grant a given service account RBAC permissions to use that `SharedConfigMap` CR instance. - -.Prerequisites - -* You have created a `SharedConfigMap` CR instance for the config map that you want to share across namespaces in the cluster. -* You must have permission to perform the following actions: -** Discover which `SharedConfigMap` CR instances are available by entering the `oc get sharedconfigmaps` command and getting a non-empty list back. -** Determine if the service account your pod specifies is allowed to use the given `SharedSecret` CR instance. That is, you can run `oc adm policy who-can use <identifier of specific SharedSecret>` to see if the service account in your namespace is listed. -** Determine if the service account your pod specifies is allowed to use `csi` volumes, or if you, as the requesting user who created the pod directly, are allowed to use `csi` volumes. See "Understanding and managing pod security admission" for details. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, create, or ask someone to create, the necessary role-based access control (RBAC) so that you can discover `SharedConfigMap` CR instances and enable service accounts to use `SharedConfigMap` CR instances. -==== - -.Procedure - -. Grant a given service account RBAC permissions to use the `SharedConfigMap` CR instance in its pod by using `oc apply` with YAML content. -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming a `SharedConfigMap` CR instance. -==== -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: shared-resource-my-share - namespace: my-namespace -rules: - - apiGroups: - - sharedresource.openshift.io - resources: - - sharedconfigmaps - resourceNames: - - my-share - verbs: - - use -EOF ----- - -. Create the `RoleBinding` associated with the role by using the `oc` command: -+ -[source,terminal] ----- -oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:builder ----- - -. Access the `SharedConfigMap` CR instance from a pod: -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -kind: Pod -apiVersion: v1 -metadata: - name: my-app - namespace: my-namespace -spec: - serviceAccountName: default - -# containers omitted …. Follow standard use of ‘volumeMounts’ for referencing your shared resource volume - - volumes: - - name: my-csi-volume - csi: - readOnly: true - driver: csi.sharedresource.openshift.io - volumeAttributes: - sharedConfigMap: my-share - -EOF ----- diff --git a/modules/ephemeral-storage-using-a-sharedsecrets-resource-in-a-pod.adoc b/modules/ephemeral-storage-using-a-sharedsecrets-resource-in-a-pod.adoc deleted file mode 100644 index 8ffe930a3526..000000000000 --- a/modules/ephemeral-storage-using-a-sharedsecrets-resource-in-a-pod.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: PROCEDURE - -[id="ephemeral-storage-using-a-sharedsecrets-resource-in-a-pod_{context}"] -= Using a SharedSecret instance in a pod - -[role="_abstract"] -To access a `SharedSecret` custom resource (CR) instance from a pod, you grant a given service account RBAC permissions to use that `SharedSecret` CR instance. - -.Prerequisites - -* You have created a `SharedSecret` CR instance for the secret you want to share across namespaces in the cluster. -* You must have permission to perform the following actions -** Discover which `SharedSecret` CR instances are available by entering the `oc get sharedsecrets` command and getting a non-empty list back. -** Determine if the service account your pod specifies is allowed to use the given `SharedSecret` CR instance. That is, you can run `oc adm policy who-can use <identifier of specific SharedSecret>` to see if the service account in your namespace is listed. -** Determine if the service account your pod specifies is allowed to use `csi` volumes, or if you, as the requesting user who created the pod directly, are allowed to use `csi` volumes. See "Understanding and managing pod security admission" for details. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, create, or ask someone to create, the necessary role-based access control (RBAC) so that you can discover `SharedSecret` CR instances and enable service accounts to use `SharedSecret` CR instances. -==== - -.Procedure - -. Grant a given service account RBAC permissions to use the `SharedSecret` CR instance in its pod by using `oc apply` with YAML content: -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming `SharedSecret` CR instances. -==== -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: shared-resource-my-share - namespace: my-namespace -rules: - - apiGroups: - - sharedresource.openshift.io - resources: - - sharedsecrets - resourceNames: - - my-share - verbs: - - use -EOF ----- - -. Create the `RoleBinding` associated with the role by using the `oc` command: -+ -[source,terminal] ----- -$ oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:builder ----- - -. Access the `SharedSecret` CR instance from a pod: -+ -[source,terminal] ----- -$ oc apply -f - <<EOF -kind: Pod -apiVersion: v1 -metadata: - name: my-app - namespace: my-namespace -spec: - serviceAccountName: default - -# containers omitted …. Follow standard use of ‘volumeMounts’ for referencing your shared resource volume - - volumes: - - name: my-csi-volume - csi: - readOnly: true - driver: csi.sharedresource.openshift.io - volumeAttributes: - sharedSecret: my-share - -EOF ----- diff --git a/modules/etcd-defrag.adoc b/modules/etcd-defrag.adoc deleted file mode 100644 index 0611d436cce1..000000000000 --- a/modules/etcd-defrag.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc -// * scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc - -:_content-type: PROCEDURE -[id="etcd-defrag_{context}"] -= Defragmenting etcd data - -For large and dense clusters, etcd can suffer from poor performance if the keyspace grows too large and exceeds the space quota. Periodically maintain and defragment etcd to free up space in the data store. Monitor Prometheus for etcd metrics and defragment it when required; otherwise, etcd can raise a cluster-wide alarm that puts the cluster into a maintenance mode that accepts only key reads and deletes. - -Monitor these key metrics: - -* `etcd_server_quota_backend_bytes`, which is the current quota limit -* `etcd_mvcc_db_total_size_in_use_in_bytes`, which indicates the actual database usage after a history compaction -* `etcd_mvcc_db_total_size_in_bytes`, which shows the database size, including free space waiting for defragmentation - -Defragment etcd data to reclaim disk space after events that cause disk fragmentation, such as etcd history compaction. - -History compaction is performed automatically every five minutes and leaves gaps in the back-end database. This fragmented space is available for use by etcd, but is not available to the host file system. You must defragment etcd to make this space available to the host file system. - -Defragmentation occurs automatically, but you can also trigger it manually. - -[NOTE] -==== -Automatic defragmentation is good for most cases, because the etcd operator uses cluster information to determine the most efficient operation for the user. -==== - -[id="automatic-defrag-etcd-data_{context}"] -== Automatic defragmentation - -The etcd Operator automatically defragments disks. No manual intervention is needed. - -Verify that the defragmentation process is successful by viewing one of these logs: - -* etcd logs -* cluster-etcd-operator pod -* operator status error log - -[WARNING] -==== -Automatic defragmentation can cause leader election failure in various OpenShift core components, such as the Kubernetes controller manager, which triggers a restart of the failing component. The restart is harmless and either triggers failover to the next running instance or the component resumes work again after the restart. -==== - -.Example log output for successful defragmentation -[source,terminal] -[subs="+quotes"] ----- -etcd member has been defragmented: __<member_name>__, memberID: __<member_id>__ ----- - -.Example log output for unsuccessful defragmentation -[source,terminal] -[subs="+quotes"] ----- -failed defrag on member: __<member_name>__, memberID: __<member_id>__: __<error_message>__ ----- - -[id="manual-defrag-etcd-data_{context}"] -== Manual defragmentation - -//You can monitor the `etcd_db_total_size_in_bytes` metric to determine whether manual defragmentation is necessary. - -A Prometheus alert indicates when you need to use manual defragmentation. The alert is displayed in two cases: - - * When etcd uses more than 50% of its available space for more than 10 minutes - * When etcd is actively using less than 50% of its total database size for more than 10 minutes - -You can also determine whether defragmentation is needed by checking the etcd database size in MB that will be freed by defragmentation with the PromQL expression: `(etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes)/1024/1024` - -[WARNING] -==== -Defragmenting etcd is a blocking action. The etcd member will not respond until defragmentation is complete. For this reason, wait at least one minute between defragmentation actions on each of the pods to allow the cluster to recover. -==== - -Follow this procedure to defragment etcd data on each etcd member. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Determine which etcd member is the leader, because the leader should be defragmented last. - -.. Get the list of etcd pods: -+ -[source,terminal] ----- -$ oc -n openshift-etcd get pods -l k8s-app=etcd -o wide ----- -+ -.Example output -[source,terminal] ----- -etcd-ip-10-0-159-225.example.redhat.com 3/3 Running 0 175m 10.0.159.225 ip-10-0-159-225.example.redhat.com <none> <none> -etcd-ip-10-0-191-37.example.redhat.com 3/3 Running 0 173m 10.0.191.37 ip-10-0-191-37.example.redhat.com <none> <none> -etcd-ip-10-0-199-170.example.redhat.com 3/3 Running 0 176m 10.0.199.170 ip-10-0-199-170.example.redhat.com <none> <none> ----- - -.. Choose a pod and run the following command to determine which etcd member is the leader: -+ -[source,terminal] ----- -$ oc rsh -n openshift-etcd etcd-ip-10-0-159-225.example.redhat.com etcdctl endpoint status --cluster -w table ----- -+ -.Example output -[source,terminal] ----- -Defaulting container name to etcdctl. -Use 'oc describe pod/etcd-ip-10-0-159-225.example.redhat.com -n openshift-etcd' to see all of the containers in this pod. -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ -| https://10.0.191.37:2379 | 251cd44483d811c3 | 3.4.9 | 104 MB | false | false | 7 | 91624 | 91624 | | -| https://10.0.159.225:2379 | 264c7c58ecbdabee | 3.4.9 | 104 MB | false | false | 7 | 91624 | 91624 | | -| https://10.0.199.170:2379 | 9ac311f93915cc79 | 3.4.9 | 104 MB | true | false | 7 | 91624 | 91624 | | -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ ----- -+ -Based on the `IS LEADER` column of this output, the [x-]`https://10.0.199.170:2379` endpoint is the leader. Matching this endpoint with the output of the previous step, the pod name of the leader is `etcd-ip-10-0-199-170.example.redhat.com`. - -. Defragment an etcd member. - -.. Connect to the running etcd container, passing in the name of a pod that is _not_ the leader: -+ -[source,terminal] ----- -$ oc rsh -n openshift-etcd etcd-ip-10-0-159-225.example.redhat.com ----- - -.. Unset the `ETCDCTL_ENDPOINTS` environment variable: -+ -[source,terminal] ----- -sh-4.4# unset ETCDCTL_ENDPOINTS ----- - -.. Defragment the etcd member: -+ -[source,terminal] ----- -sh-4.4# etcdctl --command-timeout=30s --endpoints=https://localhost:2379 defrag ----- -+ -.Example output -[source,terminal] ----- -Finished defragmenting etcd member[https://localhost:2379] ----- -+ -If a timeout error occurs, increase the value for `--command-timeout` until the command succeeds. - -.. Verify that the database size was reduced: -+ -[source,terminal] ----- -sh-4.4# etcdctl endpoint status -w table --cluster ----- -+ -.Example output -[source,terminal] ----- -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ -| https://10.0.191.37:2379 | 251cd44483d811c3 | 3.4.9 | 104 MB | false | false | 7 | 91624 | 91624 | | -| https://10.0.159.225:2379 | 264c7c58ecbdabee | 3.4.9 | 41 MB | false | false | 7 | 91624 | 91624 | | <1> -| https://10.0.199.170:2379 | 9ac311f93915cc79 | 3.4.9 | 104 MB | true | false | 7 | 91624 | 91624 | | -+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ ----- -This example shows that the database size for this etcd member is now 41 MB as opposed to the starting size of 104 MB. - -.. Repeat these steps to connect to each of the other etcd members and defragment them. Always defragment the leader last. -+ -Wait at least one minute between defragmentation actions to allow the etcd pod to recover. Until the etcd pod recovers, the etcd member will not respond. - -. If any `NOSPACE` alarms were triggered due to the space quota being exceeded, clear them. - -.. Check if there are any `NOSPACE` alarms: -+ -[source,terminal] ----- -sh-4.4# etcdctl alarm list ----- -+ -.Example output -[source,terminal] ----- -memberID:12345678912345678912 alarm:NOSPACE ----- - -.. Clear the alarms: -+ -[source,terminal] ----- -sh-4.4# etcdctl alarm disarm ----- - -.Next steps - -After defragmentation, if etcd still uses more than 50% of its available space, consider increasing the disk quota for etcd. diff --git a/modules/etcd-encryption-types.adoc b/modules/etcd-encryption-types.adoc deleted file mode 100644 index a06811bb20f9..000000000000 --- a/modules/etcd-encryption-types.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * security/encrypting-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: CONCEPT -[id="etcd-encryption-types_{context}"] -= Supported encryption types - -The following encryption types are supported for encrypting etcd data in {product-title}: - -AES-CBC:: Uses AES-CBC with PKCS#7 padding and a 32 byte key to perform the encryption. The encryption keys are rotated weekly. - -AES-GCM:: Uses AES-GCM with a random nonce and a 32 byte key to perform the encryption. The encryption keys are rotated weekly. diff --git a/modules/etcd-operator.adoc b/modules/etcd-operator.adoc deleted file mode 100644 index 7c8600c604ee..000000000000 --- a/modules/etcd-operator.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="etcd-cluster-operator_{context}"] -= etcd cluster Operator - -[discrete] -== Purpose - -The etcd cluster Operator automates etcd cluster scaling, enables etcd monitoring and metrics, and simplifies disaster recovery procedures. -[discrete] -== Project - -link:https://github.com/openshift/cluster-etcd-operator/[cluster-etcd-operator] - -[discrete] -== CRDs - -* `etcds.operator.openshift.io` -** Scope: Cluster -** CR: `etcd` -** Validation: Yes - -[discrete] -== Configuration objects - -[source,terminal] ----- -$ oc edit etcd cluster ----- diff --git a/modules/etcd-overview.adoc b/modules/etcd-overview.adoc deleted file mode 100644 index 431353d9f2e1..000000000000 --- a/modules/etcd-overview.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - - -:_content-type: CONCEPT -[id="etcd-overview_{context}"] -= Overview of etcd - -etcd is a consistent, distributed key-value store that holds small amounts of data that can fit entirely in memory. Although etcd is a core component of many projects, it is the primary data store for Kubernetes, which is the standard system for container orchestration. - -[id="etcd-benefits_{context}"] -== Benefits of using etcd - -By using etcd, you can benefit in several ways: - -* Maintain consistent uptime for your cloud-native applications, and keep them working even if individual servers fail -* Store and replicate all cluster states for Kubernetes -* Distribute configuration data to provide redundancy and resiliency for the configuration of nodes - -[id="etcd-architecture_{context}"] -== How etcd works - -To ensure a reliable approach to cluster configuration and management, etcd uses the etcd Operator. The Operator simplifies the use of etcd on a Kubernetes container platform like {product-title}. With the etcd Operator, you can create or delete etcd members, resize clusters, perform backups, and upgrade etcd. - -The etcd Operator observes, analyzes, and acts: - -. It observes the cluster state by using the Kubernetes API. -. It analyzes differences between the current state and the state that you want. -. It fixes the differences through the etcd cluster management APIs, the Kubernetes API, or both. - -etcd holds the cluster state, which is constantly updated. This state is continuously persisted, which leads to a high number of small changes at high frequency. As a result, it is critical to back the etcd cluster member with fast, low-latency I/O. For more information about best practices for etcd, see "Recommended etcd practices". diff --git a/modules/example-apache-httpd-configuration.adoc b/modules/example-apache-httpd-configuration.adoc deleted file mode 100644 index 1eef9c54072c..000000000000 --- a/modules/example-apache-httpd-configuration.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc - -[id="example-apache-httpd-configuration_{context}"] -= Example Apache HTTPD configuration for basic identity providers - -The basic identify provider (IDP) configuration in {product-title} 4 requires -that the IDP server respond with JSON for success and failures. You can use CGI -scripting in Apache HTTPD to accomplish this. This section provides examples. - -.Example `/etc/httpd/conf.d/login.conf` ----- -<VirtualHost *:443> - # CGI Scripts in here - DocumentRoot /var/www/cgi-bin - - # SSL Directives - SSLEngine on - SSLCipherSuite PROFILE=SYSTEM - SSLProxyCipherSuite PROFILE=SYSTEM - SSLCertificateFile /etc/pki/tls/certs/localhost.crt - SSLCertificateKeyFile /etc/pki/tls/private/localhost.key - - # Configure HTTPD to execute scripts - ScriptAlias /basic /var/www/cgi-bin - - # Handles a failed login attempt - ErrorDocument 401 /basic/fail.cgi - - # Handles authentication - <Location /basic/login.cgi> - AuthType Basic - AuthName "Please Log In" - AuthBasicProvider file - AuthUserFile /etc/httpd/conf/passwords - Require valid-user - </Location> -</VirtualHost> ----- - -.Example `/var/www/cgi-bin/login.cgi` ----- -#!/bin/bash -echo "Content-Type: application/json" -echo "" -echo '{"sub":"userid", "name":"'$REMOTE_USER'"}' -exit 0 ----- - -.Example `/var/www/cgi-bin/fail.cgi` ----- -#!/bin/bash -echo "Content-Type: application/json" -echo "" -echo '{"error": "Login failure"}' -exit 0 ----- - -== File requirements - -These are the requirements for the files you create on an Apache HTTPD web -server: - -* `login.cgi` and `fail.cgi` must be executable (`chmod +x`). -* `login.cgi` and `fail.cgi` must have proper SELinux contexts if SELinux is -enabled: `restorecon -RFv /var/www/cgi-bin`, or ensure that the context is -`httpd_sys_script_exec_t` using `ls -laZ`. -* `login.cgi` is only executed if your user successfully logs in per `Require -and Auth` directives. -* `fail.cgi` is executed if the user fails to log in, resulting in an `HTTP 401` -response. diff --git a/modules/explanation-of-capabilities.adoc b/modules/explanation-of-capabilities.adoc deleted file mode 100644 index 98ff249574ba..000000000000 --- a/modules/explanation-of-capabilities.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/cluster-capabilities.adoc - -:_content-type: REFERENCE -[id="explanation_of_capabilities_{context}"] -= Optional cluster capabilities in {product-title} {product-version} - -Currently, cluster Operators provide the features for these optional capabilities. The following summarizes the features provided by each capability and what functionality you lose if it is disabled. diff --git a/modules/feature-gate-features.adoc b/modules/feature-gate-features.adoc deleted file mode 100644 index 55a72730f25f..000000000000 --- a/modules/feature-gate-features.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-enabling-features.adoc - -[id="feature-gate-features_{context}"] -= Features that are affected by FeatureGates - -The following Technology Preview features included in {product-title}: - -[options="header"] -|=== -| FeatureGate| Description| Default - -|`RotateKubeletServerCertificate` -|Enables the rotation of the server TLS certificate on the cluster. -|True - -|`SupportPodPidsLimit` -|Enables support for limiting the number of processes (PIDs) running in a pod. -|True - -|`MachineHealthCheck` -|Enables automatically repairing unhealthy machines in a machine pool. -|True - -|`LocalStorageCapacityIsolation` -|Enable the consumption of local ephemeral storage and also the `sizeLimit` property of an `emptyDir` volume. -|False - -|=== - -You can enable these features by editing the Feature Gate Custom Resource. -Turning on these features cannot be undone and prevents the ability to upgrade your cluster. diff --git a/modules/file-integrity-CR-phases.adoc b/modules/file-integrity-CR-phases.adoc deleted file mode 100644 index 7376bba95055..000000000000 --- a/modules/file-integrity-CR-phases.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -[id="file-integrity-CR-phases_{context}"] -= FileIntegrity custom resource phases - -* `Pending` - The phase after the custom resource (CR) is created. -* `Active` - The phase when the backing daemon set is up and running. -* `Initializing` - The phase when the AIDE database is being reinitialized. diff --git a/modules/file-integrity-events.adoc b/modules/file-integrity-events.adoc deleted file mode 100644 index e9ddd20a3868..000000000000 --- a/modules/file-integrity-events.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -:_content-type: CONCEPT -[id="file-integrity-events_{context}"] -= Understanding events - -Transitions in the status of the `FileIntegrity` and `FileIntegrityNodeStatus` objects are logged by _events_. The creation time of the event reflects the latest transition, such as `Initializing` to `Active`, and not necessarily the latest scan result. However, the newest event always reflects the most recent status. - -[source,terminal] ----- -$ oc get events --field-selector reason=FileIntegrityStatus ----- - -.Example output -[source,terminal] ----- -LAST SEEN TYPE REASON OBJECT MESSAGE -97s Normal FileIntegrityStatus fileintegrity/example-fileintegrity Pending -67s Normal FileIntegrityStatus fileintegrity/example-fileintegrity Initializing -37s Normal FileIntegrityStatus fileintegrity/example-fileintegrity Active ----- - -When a node scan fails, an event is created with the `add/changed/removed` and config map information. - -[source,terminal] ----- -$ oc get events --field-selector reason=NodeIntegrityStatus ----- - -.Example output -[source,terminal] ----- -LAST SEEN TYPE REASON OBJECT MESSAGE -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-134-173.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-168-238.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-169-175.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-152-92.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-158-144.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-131-30.ec2.internal -87m Warning NodeIntegrityStatus fileintegrity/example-fileintegrity node ip-10-0-152-92.ec2.internal has changed! a:1,c:1,r:0 \ log:openshift-file-integrity/aide-ds-example-fileintegrity-ip-10-0-152-92.ec2.internal-failed ----- - -Changes to the number of added, changed, or removed files results in a new event, even if the status of the node has not transitioned. - -[source,terminal] ----- -$ oc get events --field-selector reason=NodeIntegrityStatus ----- - -.Example output -[source,terminal] ----- -LAST SEEN TYPE REASON OBJECT MESSAGE -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-134-173.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-168-238.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-169-175.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-152-92.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-158-144.ec2.internal -114m Normal NodeIntegrityStatus fileintegrity/example-fileintegrity no changes to node ip-10-0-131-30.ec2.internal -87m Warning NodeIntegrityStatus fileintegrity/example-fileintegrity node ip-10-0-152-92.ec2.internal has changed! a:1,c:1,r:0 \ log:openshift-file-integrity/aide-ds-example-fileintegrity-ip-10-0-152-92.ec2.internal-failed -40m Warning NodeIntegrityStatus fileintegrity/example-fileintegrity node ip-10-0-152-92.ec2.internal has changed! a:3,c:1,r:0 \ log:openshift-file-integrity/aide-ds-example-fileintegrity-ip-10-0-152-92.ec2.internal-failed ----- diff --git a/modules/file-integrity-examine-default-config.adoc b/modules/file-integrity-examine-default-config.adoc deleted file mode 100644 index 6c6327b32db7..000000000000 --- a/modules/file-integrity-examine-default-config.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -:_content-type: PROCEDURE -[id="file-integrity-examine-default-config_{context}"] -= Examine the default configuration - -The default File Integrity Operator configuration is stored in a config map with -the same name as the `FileIntegrity` CR. - -.Procedure - -* To examine the default config, run: -+ -[source,terminal] ----- -$ oc describe cm/worker-fileintegrity ----- diff --git a/modules/file-integrity-important-attributes.adoc b/modules/file-integrity-important-attributes.adoc deleted file mode 100644 index 887cdf067178..000000000000 --- a/modules/file-integrity-important-attributes.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -[id="important-file-integrity-object-attributes_{context}"] -= Important attributes - -.Important `spec` and `spec.config` attributes - -[%header,cols=2*] -|=== -|Attribute -|Description - -|`spec.nodeSelector` -|A map of key-values pairs that must match with node's labels in order for the -AIDE pods to be schedulable on that node. The typical use is to set only a -single key-value pair where `node-role.kubernetes.io/worker: ""` schedules AIDE on -all worker nodes, `node.openshift.io/os_id: "rhcos"` schedules on all -{op-system-first} nodes. - -|`spec.debug` -|A boolean attribute. If set to `true`, the daemon running in the AIDE deamon set's -pods would output extra information. - -|`spec.tolerations` -|Specify tolerations to schedule on nodes with custom taints. When not specified, -a default toleration is applied, which allows tolerations to run on control plane nodes. - -|`spec.config.gracePeriod` -|The number of seconds to pause in between AIDE integrity checks. Frequent AIDE -checks on a node can be resource intensive, so it can be useful to specify a -longer interval. Defaults to `900`, or 15 minutes. - -|`maxBackups` -|The maximum number of AIDE database and log backups leftover from the `re-init` process to keep on a node. Older backups beyond this number are automatically pruned by the daemon. - -|`spec.config.name` -| Name of a configMap that contains custom AIDE configuration. If omitted, a default configuration is created. - -|`spec.config.namespace` -|Namespace of a configMap that contains custom AIDE configuration. If unset, the FIO generates a default configuration suitable for {op-system} systems. - -|`spec.config.key` -|Key that contains actual AIDE configuration in a config map specified by `name` and `namespace`. The default value is `aide.conf`. - -|`spec.config.initialDelay` -|The number of seconds to wait before starting the first AIDE integrity check. Default is set to 0. This attribute is optional. -|=== diff --git a/modules/file-integrity-node-status-failure.adoc b/modules/file-integrity-node-status-failure.adoc deleted file mode 100644 index 4914b9a76da9..000000000000 --- a/modules/file-integrity-node-status-failure.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -[id="file-integrity-node-status-failure_{context}"] -= FileIntegrityNodeStatus CR failure status example - -To simulate a failure condition, modify one of the files AIDE tracks. For example, modify `/etc/resolv.conf` on one of the worker nodes: - -[source,terminal] ----- -$ oc debug node/ip-10-0-130-192.ec2.internal ----- - -.Example output -[source,terminal] ----- -Creating debug namespace/openshift-debug-node-ldfbj ... -Starting pod/ip-10-0-130-192ec2internal-debug ... -To use host binaries, run `chroot /host` -Pod IP: 10.0.130.192 -If you don't see a command prompt, try pressing enter. -sh-4.2# echo "# integrity test" >> /host/etc/resolv.conf -sh-4.2# exit - -Removing debug pod ... -Removing debug namespace/openshift-debug-node-ldfbj ... ----- - -After some time, the `Failed` condition is reported in the results array of the corresponding `FileIntegrityNodeStatus` object. The previous `Succeeded` condition is retained, which allows you to pinpoint the time the check failed. - -[source,terminal] ----- -$ oc get fileintegritynodestatuses.fileintegrity.openshift.io/worker-fileintegrity-ip-10-0-130-192.ec2.internal -ojsonpath='{.results}' | jq -r ----- - -Alternatively, if you are not mentioning the object name, run: - -[source,terminal] ----- -$ oc get fileintegritynodestatuses.fileintegrity.openshift.io -ojsonpath='{.items[*].results}' | jq ----- - -.Example output -[source,terminal] ----- -[ - { - "condition": "Succeeded", - "lastProbeTime": "2020-09-15T12:54:14Z" - }, - { - "condition": "Failed", - "filesChanged": 1, - "lastProbeTime": "2020-09-15T12:57:20Z", - "resultConfigMapName": "aide-ds-worker-fileintegrity-ip-10-0-130-192.ec2.internal-failed", - "resultConfigMapNamespace": "openshift-file-integrity" - } -] ----- - -The `Failed` condition points to a config map that gives more details about what exactly failed and why: - -[source,terminal] ----- -$ oc describe cm aide-ds-worker-fileintegrity-ip-10-0-130-192.ec2.internal-failed ----- - -.Example output -[source,terminal] ----- -Name: aide-ds-worker-fileintegrity-ip-10-0-130-192.ec2.internal-failed -Namespace: openshift-file-integrity -Labels: file-integrity.openshift.io/node=ip-10-0-130-192.ec2.internal - file-integrity.openshift.io/owner=worker-fileintegrity - file-integrity.openshift.io/result-log= -Annotations: file-integrity.openshift.io/files-added: 0 - file-integrity.openshift.io/files-changed: 1 - file-integrity.openshift.io/files-removed: 0 - -Data - -integritylog: ------- -AIDE 0.15.1 found differences between database and filesystem!! -Start timestamp: 2020-09-15 12:58:15 - -Summary: - Total number of files: 31553 - Added files: 0 - Removed files: 0 - Changed files: 1 - - ---------------------------------------------------- -Changed files: ---------------------------------------------------- - -changed: /hostroot/etc/resolv.conf - ---------------------------------------------------- -Detailed information about changes: ---------------------------------------------------- - - -File: /hostroot/etc/resolv.conf - SHA512 : sTQYpB/AL7FeoGtu/1g7opv6C+KT1CBJ , qAeM+a8yTgHPnIHMaRlS+so61EN8VOpg - -Events: <none> ----- - -Due to the config map data size limit, AIDE logs over 1 MB are added to the failure config map as a base64-encoded gzip archive. In this case, you want to pipe the output of the above command to `base64 --decode | gunzip`. Compressed logs are indicated by the presence of a `file-integrity.openshift.io/compressed` annotation key in the config map. diff --git a/modules/file-integrity-node-status-success.adoc b/modules/file-integrity-node-status-success.adoc deleted file mode 100644 index c21827745454..000000000000 --- a/modules/file-integrity-node-status-success.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -[id="file-integrity-node-status-success_{context}"] -= FileIntegrityNodeStatus CR success example - -.Example output of a condition with a success status - -[source,terminal] ----- -[ - { - "condition": "Succeeded", - "lastProbeTime": "2020-09-15T12:45:57Z" - } -] -[ - { - "condition": "Succeeded", - "lastProbeTime": "2020-09-15T12:46:03Z" - } -] -[ - { - "condition": "Succeeded", - "lastProbeTime": "2020-09-15T12:45:48Z" - } -] ----- - -In this case, all three scans succeeded and so far there are no other conditions. diff --git a/modules/file-integrity-node-status.adoc b/modules/file-integrity-node-status.adoc deleted file mode 100644 index 4cec74251a91..000000000000 --- a/modules/file-integrity-node-status.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -[id="file-integrity-node-status-types_{context}"] -= FileIntegrityNodeStatus CR status types - -These conditions are reported in the results array of the corresponding `FileIntegrityNodeStatus` CR status: - -* `Succeeded` - The integrity check passed; the files and directories covered by the AIDE check have not been modified since the database was last initialized. - -* `Failed` - The integrity check failed; some files or directories covered by the AIDE check have been modified since the database was last initialized. - -* `Errored` - The AIDE scanner encountered an internal error. diff --git a/modules/file-integrity-operator-changing-custom-config.adoc b/modules/file-integrity-operator-changing-custom-config.adoc deleted file mode 100644 index fe5889a6c2ea..000000000000 --- a/modules/file-integrity-operator-changing-custom-config.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -[id="file-integrity-operator-changing-custom-config_{context}"] -= Changing the custom File Integrity configuration - -To change the File Integrity configuration, never change the generated -config map. Instead, change the config map that is linked to the `FileIntegrity` -object through the `spec.name`, `namespace`, and `key` attributes. diff --git a/modules/file-integrity-operator-defining-custom-config.adoc b/modules/file-integrity-operator-defining-custom-config.adoc deleted file mode 100644 index f6d472aa7929..000000000000 --- a/modules/file-integrity-operator-defining-custom-config.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -:_content-type: PROCEDURE -[id="file-integrity-operator-defining-custom-config_{context}"] -= Defining a custom File Integrity Operator configuration - -This example focuses on defining a custom configuration for a scanner that runs -on the control plane nodes based on the default configuration provided for the -`worker-fileintegrity` CR. This workflow might be useful if you are planning -to deploy a custom software running as a daemon set and storing its data under -`/opt/mydaemon` on the control plane nodes. - -.Procedure - -. Make a copy of the default configuration. - -. Edit the default configuration with the files that must be watched or excluded. - -. Store the edited contents in a new config map. - -. Point the `FileIntegrity` object to the new config map through the attributes in -`spec.config`. - -. Extract the default configuration: -+ -[source,terminal] ----- -$ oc extract cm/worker-fileintegrity --keys=aide.conf ----- -+ -This creates a file named `aide.conf` that you can edit. To illustrate how the -Operator post-processes the paths, this example adds an exclude directory -without the prefix: -+ -[source,terminal] ----- -$ vim aide.conf ----- -+ -.Example output -[source,terminal] ----- -/hostroot/etc/kubernetes/static-pod-resources -!/hostroot/etc/kubernetes/aide.* -!/hostroot/etc/kubernetes/manifests -!/hostroot/etc/docker/certs.d -!/hostroot/etc/selinux/targeted -!/hostroot/etc/openvswitch/conf.db ----- -+ -Exclude a path specific to control plane nodes: -+ -[source,terminal] ----- -!/opt/mydaemon/ ----- -+ -Store the other content in `/etc`: -+ -[source,terminal] ----- -/hostroot/etc/ CONTENT_EX ----- - -. Create a config map based on this file: -+ -[source,terminal] ----- -$ oc create cm master-aide-conf --from-file=aide.conf ----- - -. Define a `FileIntegrity` CR manifest that references the config map: -+ -[source,yaml] ----- -apiVersion: fileintegrity.openshift.io/v1alpha1 -kind: FileIntegrity -metadata: - name: master-fileintegrity - namespace: openshift-file-integrity -spec: - nodeSelector: - node-role.kubernetes.io/master: "" - config: - name: master-aide-conf - namespace: openshift-file-integrity ----- -+ -The Operator processes the provided config map file and stores the result in a -config map with the same name as the `FileIntegrity` object: -+ -[source,terminal] ----- -$ oc describe cm/master-fileintegrity | grep /opt/mydaemon ----- -+ -.Example output -[source,terminal] ----- -!/hostroot/opt/mydaemon ----- diff --git a/modules/file-integrity-operator-exploring-daemon-sets.adoc b/modules/file-integrity-operator-exploring-daemon-sets.adoc deleted file mode 100644 index 87c4a3b03a9b..000000000000 --- a/modules/file-integrity-operator-exploring-daemon-sets.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-advanced-usage.adoc - -[id="file-integrity-operator-exploring-daemon-sets_{context}"] -= Exploring the daemon sets - -Each `FileIntegrity` object represents a scan on a number of nodes. The scan -itself is performed by pods managed by a daemon set. - -To find the daemon set that represents a `FileIntegrity` object, run: - -[source,terminal] ----- -$ oc -n openshift-file-integrity get ds/aide-worker-fileintegrity ----- - -To list the pods in that daemon set, run: - -[source,terminal] ----- -$ oc -n openshift-file-integrity get pods -lapp=aide-worker-fileintegrity ----- - -To view logs of a single AIDE pod, call `oc logs` on one of the pods. - -[source,terminal] ----- -$ oc -n openshift-file-integrity logs pod/aide-worker-fileintegrity-mr8x6 ----- - -.Example output -[source,terminal] ----- -Starting the AIDE runner daemon -initializing AIDE db -initialization finished -running aide check -... ----- - -The config maps created by the AIDE daemon are not retained and are deleted -after the File Integrity Operator processes them. However, on failure and error, -the contents of these config maps are copied to the config map that the -`FileIntegrityNodeStatus` object points to. diff --git a/modules/file-integrity-operator-installing-cli.adoc b/modules/file-integrity-operator-installing-cli.adoc deleted file mode 100644 index 25b574cb1ed8..000000000000 --- a/modules/file-integrity-operator-installing-cli.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-file-integrity-operator-using-cli_{context}"] -= Installing the File Integrity Operator using the CLI - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. Create a `Namespace` object YAML file by running: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged <1> - name: openshift-file-integrity ----- -<1> In {product-title} {product-version}, the pod security label must be set to `privileged` at the namespace level. - -. Create the `OperatorGroup` object YAML file: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: file-integrity-operator - namespace: openshift-file-integrity -spec: - targetNamespaces: - - openshift-file-integrity ----- - -. Create the `Subscription` object YAML file: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: file-integrity-operator - namespace: openshift-file-integrity -spec: - channel: "stable" - installPlanApproval: Automatic - name: file-integrity-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.Verification - -. Verify the installation succeeded by inspecting the CSV file: -+ -[source,terminal] ----- -$ oc get csv -n openshift-file-integrity ----- - -. Verify that the File Integrity Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-file-integrity ----- diff --git a/modules/file-integrity-operator-installing-web-console.adoc b/modules/file-integrity-operator-installing-web-console.adoc deleted file mode 100644 index 902b0195d9a8..000000000000 --- a/modules/file-integrity-operator-installing-web-console.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-file-integrity-operator-using-web-console_{context}"] -= Installing the File Integrity Operator using the web console - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the File Integrity Operator, then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-file-integrity` namespace. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `openshift-file-integrity` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-file-integrity` project that are reporting issues. diff --git a/modules/file-integrity-operator-machine-config-integration.adoc b/modules/file-integrity-operator-machine-config-integration.adoc deleted file mode 100644 index 8e29ade7ce97..000000000000 --- a/modules/file-integrity-operator-machine-config-integration.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-advanced-usage.adoc - -[id="file-integrity-operator-machine-config-integration_{context}"] -= Machine config integration - -In {product-title} 4, the cluster node configuration is delivered through -`MachineConfig` objects. You can assume that the changes to files that are -caused by a `MachineConfig` object are expected and should not cause the file -integrity scan to fail. To suppress changes to files caused by `MachineConfig` -object updates, the File Integrity Operator watches the node objects; when a -node is being updated, the AIDE scans are suspended for the duration of the -update. When the update finishes, the database is reinitialized and the scans -resume. - -This pause and resume logic only applies to updates through the `MachineConfig` -API, as they are reflected in the node object annotations. diff --git a/modules/file-integrity-operator-reinitializing-database.adoc b/modules/file-integrity-operator-reinitializing-database.adoc deleted file mode 100644 index 17f49f63bf53..000000000000 --- a/modules/file-integrity-operator-reinitializing-database.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-advanced-usage.adoc - -:_content-type: PROCEDURE -[id="file-integrity-operator-reinitializing-database_{context}"] -= Reinitializing the database - -If the File Integrity Operator detects a change that was planned, it might be required to reinitialize the database. - -.Procedure - -* Annotate the `FileIntegrity` custom resource (CR) with `file-integrity.openshift.io/re-init`: -+ -[source,terminal] ----- -$ oc annotate fileintegrities/worker-fileintegrity file-integrity.openshift.io/re-init= ----- -+ -The old database and log files are backed up and a new database is initialized. The old database and logs are retained on the nodes under `/etc/kubernetes`, as -seen in the following output from a pod spawned using `oc debug`: -+ -.Example output -[source,terminal] ----- - ls -lR /host/etc/kubernetes/aide.* --rw-------. 1 root root 1839782 Sep 17 15:08 /host/etc/kubernetes/aide.db.gz --rw-------. 1 root root 1839783 Sep 17 14:30 /host/etc/kubernetes/aide.db.gz.backup-20200917T15_07_38 --rw-------. 1 root root 73728 Sep 17 15:07 /host/etc/kubernetes/aide.db.gz.backup-20200917T15_07_55 --rw-r--r--. 1 root root 0 Sep 17 15:08 /host/etc/kubernetes/aide.log --rw-------. 1 root root 613 Sep 17 15:07 /host/etc/kubernetes/aide.log.backup-20200917T15_07_38 --rw-r--r--. 1 root root 0 Sep 17 15:07 /host/etc/kubernetes/aide.log.backup-20200917T15_07_55 ----- -+ -To provide some permanence of record, the resulting config maps are not owned by the `FileIntegrity` object, so manual cleanup is necessary. As a -result, any previous integrity failures would still be visible in the `FileIntegrityNodeStatus` object. diff --git a/modules/file-integrity-operator-viewing-attributes.adoc b/modules/file-integrity-operator-viewing-attributes.adoc deleted file mode 100644 index b7372cfdcafc..000000000000 --- a/modules/file-integrity-operator-viewing-attributes.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -[id="viewing-file-integrity-object-attributes_{context}"] -= Viewing FileIntegrity object attributes - -As with any Kubernetes custom resources (CRs), you can run `oc explain fileintegrity`, and then look at the individual attributes using: - -[source,terminal] ----- -$ oc explain fileintegrity.spec ----- - -[source,terminal] ----- -$ oc explain fileintegrity.spec.config ----- diff --git a/modules/file-integrity-supplying-custom-aide-config.adoc b/modules/file-integrity-supplying-custom-aide-config.adoc deleted file mode 100644 index 45996b582b88..000000000000 --- a/modules/file-integrity-supplying-custom-aide-config.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -[id="file-integrity-operator-supplying-custom-aide-config_{context}"] -= Supplying a custom AIDE configuration - -Any entries that configure AIDE internal behavior such as `DBDIR`, `LOGDIR`, -`database`, and `database_out` are overwritten by the Operator. The Operator -would add a prefix to `/hostroot/` before all paths to be watched for integrity -changes. This makes reusing existing AIDE configs that might often not be -tailored for a containerized environment and start from the root directory -easier. - -[NOTE] -==== -`/hostroot` is the directory where the pods running AIDE mount the host's -file system. Changing the configuration triggers a reinitializing of the database. -==== diff --git a/modules/file-integrity-understanding-default-config.adoc b/modules/file-integrity-understanding-default-config.adoc deleted file mode 100644 index dc8acd071cb6..000000000000 --- a/modules/file-integrity-understanding-default-config.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-configuring.adoc - -:_content-type: CONCEPT -[id="file-integrity-understanding-default-config_{context}"] -= Understanding the default File Integrity Operator configuration - -Below is an excerpt from the `aide.conf` key of the config map: - -[source,bash] ----- -@@define DBDIR /hostroot/etc/kubernetes -@@define LOGDIR /hostroot/etc/kubernetes -database=file:@@{DBDIR}/aide.db.gz -database_out=file:@@{DBDIR}/aide.db.gz -gzip_dbout=yes -verbose=5 -report_url=file:@@{LOGDIR}/aide.log -report_url=stdout -PERMS = p+u+g+acl+selinux+xattrs -CONTENT_EX = sha512+ftype+p+u+g+n+acl+selinux+xattrs - -/hostroot/boot/ CONTENT_EX -/hostroot/root/\..* PERMS -/hostroot/root/ CONTENT_EX ----- - -The default configuration for a `FileIntegrity` instance provides coverage for -files under the following directories: - -* `/root` -* `/boot` -* `/usr` -* `/etc` - -The following directories are not covered: - -* `/var` -* `/opt` -* Some {product-title}-specific excludes under `/etc/` diff --git a/modules/file-integrity-understanding-file-integrity-cr.adoc b/modules/file-integrity-understanding-file-integrity-cr.adoc deleted file mode 100644 index 8aa9455512ed..000000000000 --- a/modules/file-integrity-understanding-file-integrity-cr.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -:_content-type: PROCEDURE -[id="understanding-file-integrity-custom-resource_{context}"] -= Creating the FileIntegrity custom resource - -An instance of a `FileIntegrity` custom resource (CR) represents a set of continuous file integrity scans for one or more nodes. - -Each `FileIntegrity` CR is backed by a daemon set running AIDE on the nodes matching the `FileIntegrity` CR specification. - -.Procedure - -. Create the following example `FileIntegrity` CR named `worker-fileintegrity.yaml` to enable scans on worker nodes: -+ -.Example FileIntegrity CR -[source,yaml] ----- -apiVersion: fileintegrity.openshift.io/v1alpha1 -kind: FileIntegrity -metadata: - name: worker-fileintegrity - namespace: openshift-file-integrity -spec: - nodeSelector: <1> - node-role.kubernetes.io/worker: "" - tolerations: <2> - - key: "myNode" - operator: "Exists" - effect: "NoSchedule" - config: <3> - name: "myconfig" - namespace: "openshift-file-integrity" - key: "config" - gracePeriod: 20 <4> - maxBackups: 5 <5> - initialDelay: 60 <6> - debug: false -status: - phase: Active <7> ----- -<1> Defines the selector for scheduling node scans. -<2> Specify `tolerations` to schedule on nodes with custom taints. When not specified, a default toleration allowing running on main and infra nodes is applied. -<3> Define a `ConfigMap` containing an AIDE configuration to use. -<4> The number of seconds to pause in between AIDE integrity checks. Frequent AIDE checks on a node might be resource intensive, so it can be useful to specify a longer interval. Default is 900 seconds (15 minutes). -<5> The maximum number of AIDE database and log backups (leftover from the re-init process) to keep on a node. Older backups beyond this number are automatically pruned by the daemon. Default is set to 5. -<6> The number of seconds to wait before starting the first AIDE integrity check. Default is set to 0. -<7> The running status of the `FileIntegrity` instance. Statuses are `Initializing`, `Pending`, or `Active`. -+ -[horizontal] -`Initializing`:: The `FileIntegrity` object is currently initializing or re-initializing the AIDE database. -`Pending`:: The `FileIntegrity` deployment is still being created. -`Active`:: The scans are active and ongoing. - -. Apply the YAML file to the `openshift-file-integrity` namespace: -+ -[source,terminal] ----- -$ oc apply -f worker-fileintegrity.yaml -n openshift-file-integrity ----- - -.Verification - -* Confirm the `FileIntegrity` object was created successfully by running the following command: -+ -[source,terminal] ----- -$ oc get fileintegrities -n openshift-file-integrity ----- -+ -.Example output -+ -[source,terminal] ----- -NAME AGE -worker-fileintegrity 14s ----- \ No newline at end of file diff --git a/modules/file-integrity-understanding-file-integrity-node-statuses-object.adoc b/modules/file-integrity-understanding-file-integrity-node-statuses-object.adoc deleted file mode 100644 index b3b7f28825f0..000000000000 --- a/modules/file-integrity-understanding-file-integrity-node-statuses-object.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -:_content-type: CONCEPT -[id="understanding-file-integrity-node-statuses-object_{context}"] -= Understanding the FileIntegrityNodeStatuses object - -The scan results of the `FileIntegrity` CR are reported in another object called `FileIntegrityNodeStatuses`. - -[source,terminal] ----- -$ oc get fileintegritynodestatuses ----- - -.Example output -[source,terminal] ----- -NAME AGE -worker-fileintegrity-ip-10-0-130-192.ec2.internal 101s -worker-fileintegrity-ip-10-0-147-133.ec2.internal 109s -worker-fileintegrity-ip-10-0-165-160.ec2.internal 102s ----- - -[NOTE] -==== -It might take some time for the `FileIntegrityNodeStatus` object results to be available. -==== - -There is one result object per node. The `nodeName` attribute of each `FileIntegrityNodeStatus` object corresponds to the node being scanned. The -status of the file integrity scan is represented in the `results` array, which holds scan conditions. - -[source,terminal] ----- -$ oc get fileintegritynodestatuses.fileintegrity.openshift.io -ojsonpath='{.items[*].results}' | jq ----- - -The `fileintegritynodestatus` object reports the latest status of an AIDE run and exposes the status as `Failed`, `Succeeded`, or `Errored` in a `status` field. - -[source,terminal] ----- -$ oc get fileintegritynodestatuses -w ----- - -.Example output -[source,terminal] ----- -NAME NODE STATUS -example-fileintegrity-ip-10-0-134-186.us-east-2.compute.internal ip-10-0-134-186.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-150-230.us-east-2.compute.internal ip-10-0-150-230.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-169-137.us-east-2.compute.internal ip-10-0-169-137.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-180-200.us-east-2.compute.internal ip-10-0-180-200.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-194-66.us-east-2.compute.internal ip-10-0-194-66.us-east-2.compute.internal Failed -example-fileintegrity-ip-10-0-222-188.us-east-2.compute.internal ip-10-0-222-188.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-134-186.us-east-2.compute.internal ip-10-0-134-186.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-222-188.us-east-2.compute.internal ip-10-0-222-188.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-194-66.us-east-2.compute.internal ip-10-0-194-66.us-east-2.compute.internal Failed -example-fileintegrity-ip-10-0-150-230.us-east-2.compute.internal ip-10-0-150-230.us-east-2.compute.internal Succeeded -example-fileintegrity-ip-10-0-180-200.us-east-2.compute.internal ip-10-0-180-200.us-east-2.compute.internal Succeeded ----- diff --git a/modules/functions-list-kn.adoc b/modules/functions-list-kn.adoc deleted file mode 100644 index e77055534907..000000000000 --- a/modules/functions-list-kn.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies - -// * /serverless/cli_tools/kn-func-ref.adoc - -:_content-type: PROCEDURE -[id="functions-list-kn_{context}"] -= Listing existing functions - -You can list existing functions by using `kn func list`. If you want to list functions that have been deployed as Knative services, you can also use `kn service list`. - -.Procedure - -* List existing functions: -+ -[source,terminal] ----- -$ kn func list [-n <namespace> -p <path>] ----- -+ -.Example output -[source,terminal] ----- -NAME NAMESPACE RUNTIME URL READY -example-function default node http://example-function.default.apps.ci-ln-g9f36hb-d5d6b.origin-ci-int-aws.dev.rhcloud.com True ----- - -* List functions deployed as Knative services: -+ -[source,terminal] ----- -$ kn service list -n <namespace> ----- -+ -.Example output -[source,terminal] ----- -NAME URL LATEST AGE CONDITIONS READY REASON -example-function http://example-function.default.apps.ci-ln-g9f36hb-d5d6b.origin-ci-int-aws.dev.rhcloud.com example-function-gzl4c 16m 3 OK / 3 True ----- diff --git a/modules/gathering-application-diagnostic-data.adoc b/modules/gathering-application-diagnostic-data.adoc deleted file mode 100644 index 71e47442a9a2..000000000000 --- a/modules/gathering-application-diagnostic-data.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-s2i.adoc - -:_content-type: PROCEDURE -[id="gathering-application-diagnostic-data_{context}"] -= Gathering application diagnostic data to investigate application failures - -Application failures can occur within running application pods. In these situations, you can retrieve diagnostic information with these strategies: - -* Review events relating to the application pods. -* Review the logs from the application pods, including application-specific log files that are not collected by the OpenShift Logging framework. -* Test application functionality interactively and run diagnostic tools in an application container. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. List events relating to a specific application pod. The following example retrieves events for an application pod named `my-app-1-akdlg`: -+ -[source,terminal] ----- -$ oc describe pod/my-app-1-akdlg ----- - -. Review logs from an application pod: -+ -[source,terminal] ----- -$ oc logs -f pod/my-app-1-akdlg ----- - -. Query specific logs within a running application pod. Logs that are sent to stdout are collected by the OpenShift Logging framework and are included in the output of the preceding command. The following query is only required for logs that are not sent to stdout. -+ -.. If an application log can be accessed without root privileges within a pod, concatenate the log file as follows: -+ -[source,terminal] ----- -$ oc exec my-app-1-akdlg -- cat /var/log/my-application.log ----- -+ -.. If root access is required to view an application log, you can start a debug container with root privileges and then view the log file from within the container. Start the debug container from the project's `DeploymentConfig` object. Pod users typically run with non-root privileges, but running troubleshooting pods with temporary root privileges can be useful during issue investigation: -+ -[source,terminal] ----- -$ oc debug dc/my-deployment-configuration --as-root -- cat /var/log/my-application.log ----- -+ -[NOTE] -==== -You can access an interactive shell with root access within the debug pod if you run `oc debug dc/<deployment_configuration> --as-root` without appending `-- <command>`. -==== - -. Test application functionality interactively and run diagnostic tools, in an application container with an interactive shell. -.. Start an interactive shell on the application container: -+ -[source,terminal] ----- -$ oc exec -it my-app-1-akdlg /bin/bash ----- -+ -.. Test application functionality interactively from within the shell. For example, you can run the container's entry point command and observe the results. Then, test changes from the command line directly, before updating the source code and rebuilding the application container through the S2I process. -+ -.. Run diagnostic binaries available within the container. -+ -[NOTE] -==== -Root privileges are required to run some diagnostic binaries. In these situations you can start a debug pod with root access, based on a problematic pod's `DeploymentConfig` object, by running `oc debug dc/<deployment_configuration> --as-root`. Then, you can run diagnostic binaries as root from within the debug pod. -==== - -. If diagnostic binaries are not available within a container, you can run a host's diagnostic binaries within a container's namespace by using `nsenter`. The following example runs `ip ad` within a container's namespace, using the host`s `ip` binary. -.. Enter into a debug session on the target node. This step instantiates a debug pod called `<node_name>-debug`: -+ -[source,terminal] ----- -$ oc debug node/my-cluster-node ----- -+ -.. Set `/host` as the root directory within the debug shell. The debug pod mounts the host's root file system in `/host` within the pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: -+ -[source,terminal] ----- -# chroot /host ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>` instead. -==== -+ -.. Determine the target container ID: -+ -[source,terminal] ----- -# crictl ps ----- -+ -.. Determine the container's process ID. In this example, the target container ID is `a7fe32346b120`: -+ -[source,terminal] ----- -# crictl inspect a7fe32346b120 --output yaml | grep 'pid:' | awk '{print $2}' ----- -+ -.. Run `ip ad` within the container's namespace, using the host's `ip` binary. This example uses `31150` as the container's process ID. The `nsenter` command enters the namespace of a target process and runs a command in its namespace. Because the target process in this example is a container's process ID, the `ip ad` command is run in the container's namespace from the host: -+ -[source,terminal] ----- -# nsenter -n -t 31150 -- ip ad ----- -+ -[NOTE] -==== -Running a host's diagnostic binaries within a container's namespace is only possible if you are using a privileged container such as a debug node. -==== diff --git a/modules/gathering-bootstrap-diagnostic-data.adoc b/modules/gathering-bootstrap-diagnostic-data.adoc deleted file mode 100644 index c9acb3aa81e7..000000000000 --- a/modules/gathering-bootstrap-diagnostic-data.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="gathering-bootstrap-diagnostic-data_{context}"] -= Gathering bootstrap node diagnostic data - -When experiencing bootstrap-related issues, you can gather `bootkube.service` `journald` unit logs and container logs from the bootstrap node. - -.Prerequisites - -* You have SSH access to your bootstrap node. -* You have the fully qualified domain name of the bootstrap node. -* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. - -.Procedure - -. If you have access to the bootstrap node's console, monitor the console until the node reaches the login prompt. - -. Verify the Ignition file configuration. -+ -* If you are hosting Ignition configuration files by using an HTTP server. -+ -.. Verify the bootstrap node Ignition file URL. Replace `<http_server_fqdn>` with HTTP server's fully qualified domain name: -+ -[source,terminal] ----- -$ curl -I http://<http_server_fqdn>:<port>/bootstrap.ign <1> ----- -<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. -+ -.. To verify that the Ignition file was received by the bootstrap node, query the HTTP server logs on the serving host. For example, if you are using an Apache web server to serve Ignition files, enter the following command: -+ -[source,terminal] ----- -$ grep -is 'bootstrap.ign' /var/log/httpd/access_log ----- -+ -If the bootstrap Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. -+ -.. If the Ignition file was not received, check that the Ignition files exist and that they have the appropriate file and web server permissions on the serving host directly. -+ -* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. -+ -.. Review the bootstrap node's console to determine if the mechanism is injecting the bootstrap node Ignition file correctly. - -. Verify the availability of the bootstrap node's assigned storage device. - -. Verify that the bootstrap node has been assigned an IP address from the DHCP server. - -. Collect `bootkube.service` journald unit logs from the bootstrap node. Replace `<bootstrap_fqdn>` with the bootstrap node's fully qualified domain name: -+ -[source,terminal] ----- -$ ssh core@<bootstrap_fqdn> journalctl -b -f -u bootkube.service ----- -+ -[NOTE] -==== -The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on control plane nodes. After etcd has started on each control plane node and the nodes have joined the cluster, the errors should stop. -==== -+ -. Collect logs from the bootstrap node containers. -.. Collect the logs using `podman` on the bootstrap node. Replace `<bootstrap_fqdn>` with the bootstrap node's fully qualified domain name: -+ -[source,terminal] ----- -$ ssh core@<bootstrap_fqdn> 'for pod in $(sudo podman ps -a -q); do sudo podman logs $pod; done' ----- - -. If the bootstrap process fails, verify the following. -+ -* You can resolve `api.<cluster_name>.<base_domain>` from the installation host. -* The load balancer proxies port 6443 connections to bootstrap and control plane nodes. Ensure that the proxy configuration meets {product-title} installation requirements. diff --git a/modules/gathering-crio-logs.adoc b/modules/gathering-crio-logs.adoc deleted file mode 100644 index a6939d6d5ddd..000000000000 --- a/modules/gathering-crio-logs.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-crio-issues.adoc - -:_content-type: PROCEDURE -[id="gathering-crio-logs_{context}"] -= Gathering CRI-O journald unit logs - -If you experience CRI-O issues, you can obtain CRI-O journald unit logs from a node. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). -* You have the fully qualified domain names of the control plane or control plane machines. - -.Procedure - -. Gather CRI-O journald unit logs. The following example collects logs from all control plane nodes (within the cluster: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u crio ----- - -. Gather CRI-O journald unit logs from a specific node: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> -u crio ----- - -. If the API is not functional, review the logs using SSH instead. Replace `<node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<node>.<cluster_name>.<base_domain> journalctl -b -f -u crio.service ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== diff --git a/modules/gathering-data-audit-logs.adoc b/modules/gathering-data-audit-logs.adoc deleted file mode 100644 index 2a64fd91fa3e..000000000000 --- a/modules/gathering-data-audit-logs.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/support/virt-collecting-virt-data.adoc -// * support/gathering-cluster-data.adoc - -ifeval::["{context}" == "gathering-cluster-data"] -:support: -endif::[] -ifeval::["{context}" == "audit-log-view"] -:viewing: -endif::[] - -:_content-type: PROCEDURE -[id="gathering-data-audit-logs_{context}"] -= Gathering audit logs - -ifdef::support[] -You can gather audit logs, which are a security-relevant chronological set of records documenting the sequence of activities that have affected the system by individual users, administrators, or other components of the system. You can gather audit logs for: - -* etcd server -* Kubernetes API server -* OpenShift OAuth API server -* OpenShift API server - -endif::support[] -ifdef::viewing[] -You can use the must-gather tool to collect the audit logs for debugging your cluster, which you can review or send to Red Hat Support. -endif::viewing[] - -.Procedure - -. Run the `oc adm must-gather` command with `-- /usr/bin/gather_audit_logs`: -+ -[source,terminal] ----- -$ oc adm must-gather -- /usr/bin/gather_audit_logs ----- - -ifndef::openshift-origin[] -. Create a compressed file from the `must-gather` directory that was just created in your working directory. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather.local.472290403699006248 <1> ----- -<1> Replace `must-gather-local.472290403699006248` with the actual directory name. - -. Attach the compressed file to your support case on the link:https://access.redhat.com[Red Hat Customer Portal]. -endif::openshift-origin[] - -ifeval::["{context}" == "gathering-cluster-data"] -:!support: -endif::[] -ifeval::["{context}" == "audit-log-view"] -:!viewing: -endif::[] diff --git a/modules/gathering-data-network-logs.adoc b/modules/gathering-data-network-logs.adoc deleted file mode 100644 index d02e7b1a53e8..000000000000 --- a/modules/gathering-data-network-logs.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * support/gathering-cluster-data.adoc - - -:_content-type: PROCEDURE -[id="gathering-data-network-logs_{context}"] -= Gathering network logs - -You can gather network logs on all nodes in a cluster. - -.Procedure - -. Run the `oc adm must-gather` command with `-- gather_network_logs`: -+ -[source,terminal] ----- -$ oc adm must-gather -- gather_network_logs ----- - -. Create a compressed file from the `must-gather` directory that was just created in your working directory. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather.local.472290403699006248 <1> ----- -<1> Replace `must-gather-local.472290403699006248` with the actual directory name. - -. Attach the compressed file to your support case on the link:https://access.redhat.com[Red Hat Customer Portal]. \ No newline at end of file diff --git a/modules/gathering-data-specific-features.adoc b/modules/gathering-data-specific-features.adoc deleted file mode 100644 index 7ff8dcad633c..000000000000 --- a/modules/gathering-data-specific-features.adoc +++ /dev/null @@ -1,363 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/support/virt-collecting-virt-data.adoc -// * support/gathering-cluster-data.adoc - -//This file contains UI elements and/or package names that need to be updated. - -ifeval::["{context}" == "gathering-cluster-data"] -:from-main-support-section: -:VirtProductName: OpenShift Virtualization -endif::[] - -:_content-type: PROCEDURE -[id="gathering-data-specific-features_{context}"] -= Gathering data about specific features - -You can gather debugging information about specific features by using the `oc adm must-gather` CLI command with the `--image` or `--image-stream` argument. The `must-gather` tool supports multiple images, so you can gather data about more than one feature by running a single command. - -ifdef::from-main-support-section[] - -ifndef::openshift-origin[] - -.Supported must-gather images -[cols="2,2",options="header",subs="attributes+"] -|=== -|Image |Purpose - -|`registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v<installed_version_virt>` -|Data collection for {VirtProductName}. - -|`registry.redhat.io/openshift-serverless-1/svls-must-gather-rhel8` -|Data collection for OpenShift Serverless. - -|`registry.redhat.io/openshift-service-mesh/istio-must-gather-rhel8:v<installed_version_service_mesh>` -|Data collection for Red Hat OpenShift Service Mesh. - -ifndef::openshift-dedicated[] -|`registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v<installed_version_migration_toolkit>` -|Data collection for the {mtc-full}. -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] -|`registry.redhat.io/rhcam-1-2/openshift-migration-must-gather-rhel8` -|Data collection for migration-related information. -endif::openshift-dedicated[] - -|`registry.redhat.io/odf4/ocs-must-gather-rhel8:v<installed_version_ODF>` -|Data collection for {rh-storage-first}. - -|`registry.redhat.io/openshift-logging/cluster-logging-rhel8-operator` -|Data collection for OpenShift Logging. - -|`registry.redhat.io/openshift4/ose-csi-driver-shared-resource-mustgather-rhel8` -|Data collection for OpenShift Shared Resource CSI Driver. - -ifndef::openshift-dedicated[] -|`registry.redhat.io/openshift4/ose-local-storage-mustgather-rhel8:v<installed_version_LSO>` -|Data collection for Local Storage Operator. -endif::openshift-dedicated[] - -|`registry.redhat.io/openshift-sandboxed-containers/osc-must-gather-rhel8:v<installed_version_sandboxed_containers>` -|Data collection for {sandboxed-containers-first}. - -|`registry.redhat.io/workload-availability/self-node-remediation-must-gather-rhel8:v<installed-version-SNR>` -|Data collection for the Self Node Remediation (SNR) Operator and the Node Health Check (NHC) Operator. - -|`registry.redhat.io/workload-availability/node-maintenance-must-gather-rhel8:v<installed-version-NMO>` -|Data collection for the Node Maintenance Operator (NMO). - -|`registry.redhat.io/openshift-gitops-1/gitops-must-gather-rhel8:v<installed_version_GitOps>` -|Data collection for {gitops-title}. -|=== - -[NOTE] -==== -To determine the latest version for an {product-title} component's image, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy] web page on the Red Hat Customer Portal. -==== - -endif::openshift-origin[] - -ifdef::openshift-origin[] - -.Available must-gather images -[cols="2,2",options="header"] -|=== -|Image |Purpose - -|`quay.io/kubevirt/must-gather` -|Data collection for KubeVirt. - -|`quay.io/openshift-knative/must-gather` -|Data collection for Knative. - -|`docker.io/maistra/istio-must-gather` -|Data collection for service mesh. - -|`quay.io/konveyor/must-gather` -|Data collection for migration-related information. - -|`quay.io/ocs-dev/ocs-must-gather` -|Data collection for {rh-storage}. - -|`quay.io/openshift/origin-cluster-logging-operator` -|Data collection for OpenShift Logging. - -ifndef::openshift-dedicated[] -|`quay.io/openshift/origin-local-storage-mustgather` -|Data collection for Local Storage Operator. -endif::openshift-dedicated[] - -|=== - -endif::openshift-origin[] - -endif::from-main-support-section[] - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -ifndef::openshift-dedicated[] -* The {product-title} CLI (`oc`) installed. -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] -* The OpenShift CLI (`oc`) installed. -endif::openshift-dedicated[] - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. - -ifndef::openshift-origin[] - -. Run the `oc adm must-gather` command with one or more `--image` or `--image-stream` arguments. -+ -[NOTE] -==== -* To collect the default `must-gather` data in addition to specific feature data, add the `--image-stream=openshift/must-gather` argument. - -* For information on gathering data about the Custom Metrics Autoscaler, see the Additional resources section that follows. -==== -+ -For example, the following command gathers both the default cluster data and information specific to {VirtProductName}: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather \ - --image-stream=openshift/must-gather \ <1> - --image=registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v{HCOVersion} <2> ----- -<1> The default {product-title} `must-gather` image -<2> The must-gather image for {VirtProductName} -+ -You can use the `must-gather` tool with additional arguments to gather data that is specifically related to OpenShift Logging and the -ifndef::openshift-dedicated[] -Red Hat OpenShift -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] -Cluster -endif::openshift-dedicated[] -Logging Operator in your cluster. For OpenShift Logging, run the following command: -+ -[source,terminal] ----- -$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator \ - -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ----- -+ -.Example `must-gather` output for OpenShift Logging -[%collapsible] -==== -[source,terminal] ----- -├── cluster-logging -│ ├── clo -│ │ ├── cluster-logging-operator-74dd5994f-6ttgt -│ │ ├── clusterlogforwarder_cr -│ │ ├── cr -│ │ ├── csv -│ │ ├── deployment -│ │ └── logforwarding_cr -│ ├── collector -│ │ ├── fluentd-2tr64 -ifdef::openshift-dedicated[] -│ ├── curator -│ │ └── curator-1596028500-zkz4s -endif::openshift-dedicated[] -│ ├── eo -│ │ ├── csv -│ │ ├── deployment -│ │ └── elasticsearch-operator-7dc7d97b9d-jb4r4 -│ ├── es -│ │ ├── cluster-elasticsearch -│ │ │ ├── aliases -│ │ │ ├── health -│ │ │ ├── indices -│ │ │ ├── latest_documents.json -│ │ │ ├── nodes -│ │ │ ├── nodes_stats.json -│ │ │ └── thread_pool -│ │ ├── cr -│ │ ├── elasticsearch-cdm-lp8l38m0-1-794d6dd989-4jxms -│ │ └── logs -│ │ ├── elasticsearch-cdm-lp8l38m0-1-794d6dd989-4jxms -│ ├── install -│ │ ├── co_logs -│ │ ├── install_plan -│ │ ├── olmo_logs -│ │ └── subscription -│ └── kibana -│ ├── cr -│ ├── kibana-9d69668d4-2rkvz -├── cluster-scoped-resources -│ └── core -│ ├── nodes -│ │ ├── ip-10-0-146-180.eu-west-1.compute.internal.yaml -│ └── persistentvolumes -│ ├── pvc-0a8d65d9-54aa-4c44-9ecc-33d9381e41c1.yaml -├── event-filter.html -├── gather-debug.log -└── namespaces - ├── openshift-logging - │ ├── apps - │ │ ├── daemonsets.yaml - │ │ ├── deployments.yaml - │ │ ├── replicasets.yaml - │ │ └── statefulsets.yaml - │ ├── batch - │ │ ├── cronjobs.yaml - │ │ └── jobs.yaml - │ ├── core - │ │ ├── configmaps.yaml - │ │ ├── endpoints.yaml - │ │ ├── events -ifndef::openshift-dedicated[] - │ │ │ ├── elasticsearch-im-app-1596020400-gm6nl.1626341a296c16a1.yaml - │ │ │ ├── elasticsearch-im-audit-1596020400-9l9n4.1626341a2af81bbd.yaml - │ │ │ ├── elasticsearch-im-infra-1596020400-v98tk.1626341a2d821069.yaml - │ │ │ ├── elasticsearch-im-app-1596020400-cc5vc.1626341a3019b238.yaml - │ │ │ ├── elasticsearch-im-audit-1596020400-s8d5s.1626341a31f7b315.yaml - │ │ │ ├── elasticsearch-im-infra-1596020400-7mgv8.1626341a35ea59ed.yaml -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] - │ │ │ ├── curator-1596021300-wn2ks.162634ebf0055a94.yaml - │ │ │ ├── curator.162638330681bee2.yaml - │ │ │ ├── elasticsearch-delete-app-1596020400-gm6nl.1626341a296c16a1.yaml - │ │ │ ├── elasticsearch-delete-audit-1596020400-9l9n4.1626341a2af81bbd.yaml - │ │ │ ├── elasticsearch-delete-infra-1596020400-v98tk.1626341a2d821069.yaml - │ │ │ ├── elasticsearch-rollover-app-1596020400-cc5vc.1626341a3019b238.yaml - │ │ │ ├── elasticsearch-rollover-audit-1596020400-s8d5s.1626341a31f7b315.yaml - │ │ │ ├── elasticsearch-rollover-infra-1596020400-7mgv8.1626341a35ea59ed.yaml -endif::openshift-dedicated[] - │ │ ├── events.yaml - │ │ ├── persistentvolumeclaims.yaml - │ │ ├── pods.yaml - │ │ ├── replicationcontrollers.yaml - │ │ ├── secrets.yaml - │ │ └── services.yaml - │ ├── openshift-logging.yaml - │ ├── pods - │ │ ├── cluster-logging-operator-74dd5994f-6ttgt - │ │ │ ├── cluster-logging-operator - │ │ │ │ └── cluster-logging-operator - │ │ │ │ └── logs - │ │ │ │ ├── current.log - │ │ │ │ ├── previous.insecure.log - │ │ │ │ └── previous.log - │ │ │ └── cluster-logging-operator-74dd5994f-6ttgt.yaml - │ │ ├── cluster-logging-operator-registry-6df49d7d4-mxxff - │ │ │ ├── cluster-logging-operator-registry - │ │ │ │ └── cluster-logging-operator-registry - │ │ │ │ └── logs - │ │ │ │ ├── current.log - │ │ │ │ ├── previous.insecure.log - │ │ │ │ └── previous.log - │ │ │ ├── cluster-logging-operator-registry-6df49d7d4-mxxff.yaml - │ │ │ └── mutate-csv-and-generate-sqlite-db - │ │ │ └── mutate-csv-and-generate-sqlite-db - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log -ifdef::openshift-dedicated[] - │ │ ├── curator-1596028500-zkz4s -endif::openshift-dedicated[] - │ │ ├── elasticsearch-cdm-lp8l38m0-1-794d6dd989-4jxms -ifndef::openshift-dedicated[] - │ │ ├── elasticsearch-im-app-1596030300-bpgcx - │ │ │ ├── elasticsearch-im-app-1596030300-bpgcx.yaml -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] - │ │ ├── elasticsearch-delete-app-1596030300-bpgcx - │ │ │ ├── elasticsearch-delete-app-1596030300-bpgcx.yaml -endif::openshift-dedicated[] - │ │ │ └── indexmanagement - │ │ │ └── indexmanagement - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log - │ │ ├── fluentd-2tr64 - │ │ │ ├── fluentd - │ │ │ │ └── fluentd - │ │ │ │ └── logs - │ │ │ │ ├── current.log - │ │ │ │ ├── previous.insecure.log - │ │ │ │ └── previous.log - │ │ │ ├── fluentd-2tr64.yaml - │ │ │ └── fluentd-init - │ │ │ └── fluentd-init - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log - │ │ ├── kibana-9d69668d4-2rkvz - │ │ │ ├── kibana - │ │ │ │ └── kibana - │ │ │ │ └── logs - │ │ │ │ ├── current.log - │ │ │ │ ├── previous.insecure.log - │ │ │ │ └── previous.log - │ │ │ ├── kibana-9d69668d4-2rkvz.yaml - │ │ │ └── kibana-proxy - │ │ │ └── kibana-proxy - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log - │ └── route.openshift.io - │ └── routes.yaml - └── openshift-operators-redhat - ├── ... ----- -==== -endif::openshift-origin[] - -. Run the `oc adm must-gather` command with one or more `--image` or `--image-stream` arguments. For example, the following command gathers both the default cluster data and information specific to KubeVirt: -+ -[source,terminal] ----- -$ oc adm must-gather \ - --image-stream=openshift/must-gather \ <1> - --image=quay.io/kubevirt/must-gather <2> ----- -<1> The default {product-title} `must-gather` image -<2> The must-gather image for KubeVirt - -ifndef::openshift-origin[] -. Create a compressed file from the `must-gather` directory that was just created in your working directory. For example, on a computer that uses a Linux -operating system, run the following command: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather.local.5421342344627712289/ <1> ----- -<1> Make sure to replace `must-gather-local.5421342344627712289/` with the -actual directory name. - -. Attach the compressed file to your support case on the link:https://access.redhat.com[Red Hat Customer Portal]. -endif::openshift-origin[] - -ifeval::["{context}" == "gathering-cluster-data"] -:!from-main-support-section: -:!VirtProductName: -endif::[] diff --git a/modules/gathering-kubelet-logs.adoc b/modules/gathering-kubelet-logs.adoc deleted file mode 100644 index f6bbce89771b..000000000000 --- a/modules/gathering-kubelet-logs.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: PROCEDURE -[id="gathering-kubelet-logs_{context}"] -= Gathering kubelet logs - -.Procedure - -* After the kubelet's log level verbosity is configured properly, you can gather logs by running the following commands: -+ -[source,terminal] ----- -$ oc adm node-logs --role master -u kubelet ----- -+ -[source,terminal] ----- -$ oc adm node-logs --role worker -u kubelet ----- -+ -Alternatively, inside the node, run the following command: -+ -[source,terminal] ----- -$ journalctl -b -f -u kubelet.service ----- - -* To collect master container logs, run the following command: -+ -[source,terminal] ----- -$ sudo tail -f /var/log/containers/* ----- - -* To directly gather the logs of all nodes, run the following command: -+ -[source,terminal] ----- -- for n in $(oc get node --no-headers | awk '{print $1}'); do oc adm node-logs $n | gzip > $n.log.gz; done ----- diff --git a/modules/gathering-operator-logs.adoc b/modules/gathering-operator-logs.adoc deleted file mode 100644 index db808849d4d1..000000000000 --- a/modules/gathering-operator-logs.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-operator-issues.adoc - -:_content-type: PROCEDURE -[id="gathering-operator-logs_{context}"] -= Gathering Operator logs - -If you experience Operator issues, you can gather detailed diagnostic information from Operator pod logs. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). -* You have the fully qualified domain names of the control plane or control plane machines. - -.Procedure - -. List the Operator pods that are running in the Operator's namespace, plus the pod status, restarts, and age: -+ -[source,terminal] ----- -$ oc get pods -n <operator_namespace> ----- - -. Review logs for an Operator pod: -+ -[source,terminal] ----- -$ oc logs pod/<pod_name> -n <operator_namespace> ----- -+ -If an Operator pod has multiple containers, the preceding command will produce an error that includes the name of each container. Query logs from an individual container: -+ -[source,terminal] ----- -$ oc logs pod/<operator_pod_name> -c <container_name> -n <operator_namespace> ----- - -. If the API is not functional, review Operator pod and container logs on each control plane node by using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values. -.. List pods on each control plane node: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl pods ----- -+ -.. For any Operator pods not showing a `Ready` status, inspect the pod's status in detail. Replace `<operator_pod_id>` with the Operator pod's ID listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl inspectp <operator_pod_id> ----- -+ -.. List containers related to an Operator pod: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl ps --pod=<operator_pod_id> ----- -+ -.. For any Operator container not showing a `Ready` status, inspect the container's status in detail. Replace `<container_id>` with a container ID listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl inspect <container_id> ----- -+ -.. Review the logs for any Operator containers not showing a `Ready` status. Replace `<container_id>` with a container ID listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl logs -f <container_id> ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== diff --git a/modules/gathering-s2i-diagnostic-data.adoc b/modules/gathering-s2i-diagnostic-data.adoc deleted file mode 100644 index 2a9dcaf18369..000000000000 --- a/modules/gathering-s2i-diagnostic-data.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-s2i.adoc - -:_content-type: PROCEDURE -[id="gathering-s2i-diagnostic-data_{context}"] -= Gathering Source-to-Image diagnostic data - -The S2I tool runs a build pod and a deployment pod in sequence. The deployment pod is responsible for deploying the application pods based on the application container image created in the build stage. Watch build, deployment and application pod status to determine where in the S2I process a failure occurs. Then, focus diagnostic data collection accordingly. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Watch the pod status throughout the S2I process to determine at which stage a failure occurs: -+ -[source,terminal] ----- -$ oc get pods -w <1> ----- -<1> Use `-w` to monitor pods for changes until you quit the command using `Ctrl+C`. - -. Review a failed pod's logs for errors. -+ -* *If the build pod fails*, review the build pod's logs: -+ -[source,terminal] ----- -$ oc logs -f pod/<application_name>-<build_number>-build ----- -+ -[NOTE] -==== -Alternatively, you can review the build configuration's logs using `oc logs -f bc/<application_name>`. The build configuration's logs include the logs from the build pod. -==== -+ -* *If the deployment pod fails*, review the deployment pod's logs: -+ -[source,terminal] ----- -$ oc logs -f pod/<application_name>-<build_number>-deploy ----- -+ -[NOTE] -==== -Alternatively, you can review the deployment configuration's logs using `oc logs -f dc/<application_name>`. This outputs logs from the deployment pod until the deployment pod completes successfully. The command outputs logs from the application pods if you run it after the deployment pod has completed. After a deployment pod completes, its logs can still be accessed by running `oc logs -f pod/<application_name>-<build_number>-deploy`. -==== -+ -* *If an application pod fails, or if an application is not behaving as expected within a running application pod*, review the application pod's logs: -+ -[source,terminal] ----- -$ oc logs -f pod/<application_name>-<build_number>-<random_string> ----- diff --git a/modules/gcp-limits.adoc b/modules/gcp-limits.adoc deleted file mode 100644 index 91b114dda5d6..000000000000 --- a/modules/gcp-limits.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -[id="gcp-limits_{context}"] -= GCP account limits - - -The {product-title} cluster uses a number of Google Cloud Platform (GCP) components, but the default link:https://cloud.google.com/docs/quota[quotas] do not affect your ability to install an {product-title} cluster. - -A standard {product-title} cluster uses the following resources. Note that some resources are required only during the bootstrap process and are removed after the cluster deploys. - -.GCP resources used in a default cluster - -[cols="2a,2a,2a,2a,2a",options="header"] -|=== -|Service -|Component -|Location -|Total resources required -|Resources removed after bootstrap - - -|Service account |IAM |Global |5 |0 -|Firewall Rules |Compute |Global |11 |1 -|Forwarding Rules |Compute |Global |2 |0 -|In-use global IP addresses |Compute |Global |4 |1 -|Health checks |Compute |Global |3 |0 -|Images |Compute |Global |1 |0 -|Networks |Compute |Global |2 |0 -|Static IP addresses |Compute |Region |4 |1 -|Routers |Compute |Global |1 |0 -|Routes |Compute |Global |2 |0 -|Subnetworks |Compute |Global |2 |0 -|Target Pools |Compute |Global |3 |0 -|CPUs |Compute |Region |28 |4 -|Persistent Disk SSD (GB) |Compute |Region |896 |128 - -|=== - -[NOTE] -==== -If any of the quotas are insufficient during installation, the installation program displays an error that states both which quota was exceeded and the region. -==== - -Be sure to consider your actual cluster size, planned cluster growth, and any usage from other clusters that are associated with your account. The CPU, Static IP addresses, and Persistent Disk SSD (Storage) quotas are the ones that are most likely to be insufficient. - -If you plan to deploy your cluster in one of the following regions, you will exceed the maximum storage quota and are likely to exceed the CPU quota limit: - -* asia-east2 -* asia-northeast2 -* asia-south1 -* australia-southeast1 -* europe-north1 -* europe-west2 -* europe-west3 -* europe-west6 -* northamerica-northeast1 -* southamerica-east1 -* us-west2 - -You can increase resource quotas from the link:https://console.cloud.google.com/iam-admin/quotas[GCP console], but you might need to file a support ticket. Be sure to plan your cluster size early so that you can allow time to resolve the support ticket before you install your {product-title} cluster. diff --git a/modules/generating-icsp-object-scoped-to-a-registry.adoc b/modules/generating-icsp-object-scoped-to-a-registry.adoc deleted file mode 100644 index de2753a47771..000000000000 --- a/modules/generating-icsp-object-scoped-to-a-registry.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc - -:_content-type: PROCEDURE -[id="generating-icsp-object-scoped-to-a-registry_{context}"] -= Widening the scope of the mirror image catalog to reduce the frequency of cluster node reboots - -You can scope the mirrored image catalog at the repository level or the wider registry level. A widely scoped `ImageContentSourcePolicy` resource reduces the number of times the nodes need to reboot in response to changes to the resource. - -To widen the scope of the mirror image catalog in the `ImageContentSourcePolicy` resource, perform the following procedure. - -.Prerequisites - -* Install the {product-title} CLI `oc`. -* Log in as a user with `cluster-admin` privileges. -* Configure a mirrored image catalog for use in your disconnected cluster. - -.Procedure - -. Run the following command, specifying values for `<local_registry>`, `<pull_spec>`, and `<pull_secret_file>`: -+ -[source,terminal] ----- -$ oc adm catalog mirror <local_registry>/<pull_spec> <local_registry> -a <pull_secret_file> --icsp-scope=registry ----- -+ -where: -+ --- -<local_registry>:: is the local registry you have configured for your disconnected cluster, for example, `local.registry:5000`. -<pull_spec>:: is the pull specification as configured in your disconnected registry, for example, `redhat/redhat-operator-index:v{product-version}` -<pull_secret_file>:: is the `registry.redhat.io` pull secret in `.json` file format. You can download the {cluster-manager-url-pull}. --- -+ -The `oc adm catalog mirror` command creates a `/redhat-operator-index-manifests` directory and generates `imageContentSourcePolicy.yaml`, `catalogSource.yaml`, and `mapping.txt` files. - -. Apply the new `ImageContentSourcePolicy` resource to the cluster: -+ -[source,terminal] ----- -$ oc apply -f imageContentSourcePolicy.yaml ----- - -.Verification - -* Verify that `oc apply` successfully applied the change to `ImageContentSourcePolicy`: -+ -[source,terminal] ----- -$ oc get ImageContentSourcePolicy -o yaml ----- -+ -.Example output - -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1alpha1 - kind: ImageContentSourcePolicy - metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"operator.openshift.io/v1alpha1","kind":"ImageContentSourcePolicy","metadata":{"annotations":{},"name":"redhat-operator-index"},"spec":{"repositoryDigestMirrors":[{"mirrors":["local.registry:5000"],"source":"registry.redhat.io"}]}} -... ----- - -After you update the `ImageContentSourcePolicy` resource, {product-title} deploys the new settings to each node and the cluster starts using the mirrored repository for requests to the source repository. diff --git a/modules/getting-cluster-version-status-and-update-details.adoc b/modules/getting-cluster-version-status-and-update-details.adoc deleted file mode 100644 index fddb5c515e0b..000000000000 --- a/modules/getting-cluster-version-status-and-update-details.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/validating-an-installation.adoc - -:_content-type: PROCEDURE -[id="getting-cluster-version-and-update-details_{context}"] -= Getting cluster version, status, and update details - -You can view the cluster version and status by running the `oc get clusterversion` command. If the status shows that the installation is still progressing, you can review the status of the Operators for more information. - -You can also list the current update channel and review the available cluster updates. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Obtain the cluster version and overall status: -+ -[source,terminal] ----- -$ oc get clusterversion ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version 4.6.4 True False 6m25s Cluster version is 4.6.4 ----- -+ -The example output indicates that the cluster has been installed successfully. - -. If the cluster status indicates that the installation is still progressing, you can obtain more detailed progress information by checking the status of the Operators: -+ -[source,terminal] ----- -$ oc get clusteroperators.config.openshift.io ----- - -. View a detailed summary of cluster specifications, update availability, and update history: -+ -[source,terminal] ----- -$ oc describe clusterversion ----- - -. List the current update channel: -+ -[source,terminal] ----- -$ oc get clusterversion -o jsonpath='{.items[0].spec}{"\n"}' ----- -+ -.Example output -[source,terminal] ----- -{"channel":"stable-4.6","clusterID":"245539c1-72a3-41aa-9cec-72ed8cf25c5c"} ----- - -. Review the available cluster updates: -+ -[source,terminal] ----- -$ oc adm upgrade ----- -+ -.Example output -[source,terminal] ----- -Cluster version is 4.6.4 - -Updates: - -VERSION IMAGE -4.6.6 quay.io/openshift-release-dev/ocp-release@sha256:c7e8f18e8116356701bd23ae3a23fb9892dd5ea66c8300662ef30563d7104f39 ----- diff --git a/modules/getting-started-cli-connecting-a-database.adoc b/modules/getting-started-cli-connecting-a-database.adoc deleted file mode 100644 index f535643f446b..000000000000 --- a/modules/getting-started-cli-connecting-a-database.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-connecting-database_{context}"] -= Connecting to a database - -Deploy and connect a MongoDB database where the `national-parks-app` application stores location information. -Once you mark the `national-parks-app` application as a backend for the map visualization tool, `parksmap` deployment uses the {product-title} discover mechanism to display the map automatically. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure - -* To connect to a database, enter the following command: -+ -[source,terminal] ----- -$ oc new-app quay.io/centos7/mongodb-36-centos7 --name mongodb-nationalparks -e MONGODB_USER=mongodb -e MONGODB_PASSWORD=mongodb -e MONGODB_DATABASE=mongodb -e MONGODB_ADMIN_PASSWORD=mongodb -l 'app.kubernetes.io/part-of=national-parks-app,app.kubernetes.io/name=mongodb' ----- -+ -.Example output -+ -[source,terminal] ----- ---> Found container image dc18f52 (8 months old) from quay.io for "quay.io/centos7/mongodb-36-centos7" - - MongoDB 3.6 - ----------- - MongoDB (from humongous) is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemas. This container image contains programs to run mongod server. - - Tags: database, mongodb, rh-mongodb36 - - * An image stream tag will be created as "mongodb-nationalparks:latest" that will track this image - ---> Creating resources with label app.kubernetes.io/name=mongodb,app.kubernetes.io/part-of=national-parks-app ... - imagestream.image.openshift.io "mongodb-nationalparks" created - deployment.apps "mongodb-nationalparks" created - service "mongodb-nationalparks" created ---> Success ----- diff --git a/modules/getting-started-cli-creating-new-project.adoc b/modules/getting-started-cli-creating-new-project.adoc deleted file mode 100644 index dd9c062da08a..000000000000 --- a/modules/getting-started-cli-creating-new-project.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-creating-new-project_{context}"] -= Creating a new project - -A project enables a community of users to organize and manage their content in isolation. Projects are {product-title} extensions to Kubernetes namespaces. Projects have additional features that enable user self-provisioning. - -Users must receive access to projects from administrators. Cluster administrators can allow developers to create their own projects. In most cases, users automatically have access to their own projects. - -Each project has its own set of objects, policies, constraints, and service accounts. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). - -.Procedure - -* To create a new project, enter the following command: -+ -[source,terminal] ----- -$ oc new-project user-getting-started --display-name="Getting Started with OpenShift" ----- -+ -.Example output - -[source,terminal] ----- -Now using project "user-getting-started" on server "https://openshift.example.com:6443". ----- diff --git a/modules/getting-started-cli-creating-route.adoc b/modules/getting-started-cli-creating-route.adoc deleted file mode 100644 index d13359c57614..000000000000 --- a/modules/getting-started-cli-creating-route.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-creating-route_{context}"] -= Creating a route - -External clients can access applications running on {product-title} through the routing layer and the data object behind that is a _route_. The default {product-title} router (HAProxy) uses the HTTP header of the incoming request to determine where to proxy the connection. - -Optionally, you can define security, such as TLS, for the route. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. -* You must have `cluster-admin` or `project-admin` privileges. - -.Procedure - -. To retrieve the created application service, enter the following command: -+ -[source,terminal] ----- -$ oc get service ----- -+ -.Example output -+ -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -parksmap ClusterIP <your-cluster-IP> <123.456.789> 8080/TCP 8m29s ----- - -. To create a route, enter the following command: -+ -[source,terminal] ----- -$ oc create route edge parksmap --service=parksmap ----- -+ -.Example output -+ -[source,terminal] ----- -route.route.openshift.io/parksmap created ----- - -. To retrieve the created application route, enter the following command: -+ -[source,terminal] ----- -$ oc get route ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -parksmap parksmap-user-getting-started.apps.cluster.example.com parksmap 8080-tcp edge None ----- diff --git a/modules/getting-started-cli-creating-secret.adoc b/modules/getting-started-cli-creating-secret.adoc deleted file mode 100644 index b906e6e5bf2c..000000000000 --- a/modules/getting-started-cli-creating-secret.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-creating-secret_{context}"] - -= Creating a secret - -The `Secret` object provides a mechanism to hold sensitive information such as passwords, {product-title} client configuration files, private source repository credentials, and so on. -Secrets decouple sensitive content from the pods. You can mount secrets into containers using a volume plugin or the system can use secrets to perform actions on behalf of a pod. -The following procedure adds the secret `nationalparks-mongodb-parameters` and mounts it to the `nationalparks` workload. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure - -. To create a secret, enter the following command: -+ -[source,terminal] ----- -$ oc create secret generic nationalparks-mongodb-parameters --from-literal=DATABASE_SERVICE_NAME=mongodb-nationalparks --from-literal=MONGODB_USER=mongodb --from-literal=MONGODB_PASSWORD=mongodb --from-literal=MONGODB_DATABASE=mongodb --from-literal=MONGODB_ADMIN_PASSWORD=mongodb ----- -+ -.Example output -+ -[source,terminal] ----- -secret/nationalparks-mongodb-parameters created ----- - -. To update the environment variable to attach the mongodb secret to the `nationalpartks` workload, enter the following command: -+ -[source,terminal] ------ -$ oc set env --from=secret/nationalparks-mongodb-parameters deploy/nationalparks ------ -+ -.Example output -+ -[source,terminal] ----- -deployment.apps/nationalparks updated ----- - -. To show the status of the `nationalparks` deployment, enter the following command: -+ -[source,terminal] ----- -$ oc rollout status deployment nationalparks ----- -+ -.Example output -+ -[source,terminal] ----- -deployment "nationalparks" successfully rolled out ----- - -. To show the status of the `mongodb-nationalparks` deployment, enter the following command: -+ -[source,terminal] ----- -$ oc rollout status deployment mongodb-nationalparks ----- -+ -.Example output -+ -[source,terminal] ----- -deployment "nationalparks" successfully rolled out -deployment "mongodb-nationalparks" successfully rolled out ----- diff --git a/modules/getting-started-cli-deploying-first-image.adoc b/modules/getting-started-cli-deploying-first-image.adoc deleted file mode 100644 index 85819051fc35..000000000000 --- a/modules/getting-started-cli-deploying-first-image.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-deploying-first-image_{context}"] -= Deploying your first image - -The simplest way to deploy an application in {product-title} is to run an existing container image. The following procedure deploys a front-end component of an application called `national-parks-app`. The web application displays an interactive map. The map displays the location of major national parks across the world. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* Install the OpenShift CLI (`oc`). - -.Procedure - -* To deploy an application, enter the following command: -+ -[source,terminal] ----- -$ oc new-app quay.io/openshiftroadshow/parksmap:latest --name=parksmap -l 'app=national-parks-app,component=parksmap,role=frontend,app.kubernetes.io/part-of=national-parks-app' ----- -+ -.Example output -[source,text] ----- ---> Found container image 0c2f55f (12 months old) from quay.io for "quay.io/openshiftroadshow/parksmap:latest" - - * An image stream tag will be created as "parksmap:latest" that will track this image - ---> Creating resources with label app=national-parks-app,app.kubernetes.io/part-of=national-parks-app,component=parksmap,role=frontend ... - imagestream.image.openshift.io "parksmap" created - deployment.apps "parksmap" created - service "parksmap" created ---> Success ----- diff --git a/modules/getting-started-cli-deploying-python-app.adoc b/modules/getting-started-cli-deploying-python-app.adoc deleted file mode 100644 index 2d13040c2f02..000000000000 --- a/modules/getting-started-cli-deploying-python-app.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-deploying-python-app_{context}"] -= Deploying a Python application - -The following procedure deploys a back-end service for the `parksmap` application. The Python application performs 2D geo-spatial queries against a MongoDB database to locate and return map coordinates of all national parks in the world. - -The deployed back-end service is `nationalparks`. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure - -. To create a new Python application, enter the following command: -+ -[source,terminal] ----- -$ oc new-app python~https://github.com/openshift-roadshow/nationalparks-py.git --name nationalparks -l 'app=national-parks-app,component=nationalparks,role=backend,app.kubernetes.io/part-of=national-parks-app,app.kubernetes.io/name=python' --allow-missing-images=true ----- -+ -.Example output -+ -[source,text] ----- ---> Found image 0406f6c (13 days old) in image stream "openshift/python" under tag "3.9-ubi9" for "python" - - Python 3.9 - ---------- - Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - - Tags: builder, python, python39, python-39, rh-python39 - - * A source build using source code from https://github.com/openshift-roadshow/nationalparks-py.git will be created - * The resulting image will be pushed to image stream tag "nationalparks:latest" - * Use 'oc start-build' to trigger a new build - ---> Creating resources with label app=national-parks-app,app.kubernetes.io/name=python,app.kubernetes.io/part-of=national-parks-app,component=nationalparks,role=backend ... - imagestream.image.openshift.io "nationalparks" created - buildconfig.build.openshift.io "nationalparks" created - deployment.apps "nationalparks" created - service "nationalparks" created ---> Success ----- - -. To create a route to expose your application, `nationalparks`, enter the following command: -+ -[source,terminal] ----- -$ oc create route edge nationalparks --service=nationalparks ----- -+ -.Example output -+ -[source,terminal] ----- -route.route.openshift.io/parksmap created ----- - -. To retrieve the created application route, enter the following command: -+ -[source,terminal] ----- -$ oc get route ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -nationalparks nationalparks-user-getting-started.apps.cluster.example.com nationalparks 8080-tcp edge None -parksmap parksmap-user-getting-started.apps.cluster.example.com parksmap 8080-tcp edge None ----- diff --git a/modules/getting-started-cli-examining-pod.adoc b/modules/getting-started-cli-examining-pod.adoc deleted file mode 100644 index 2d37160deb11..000000000000 --- a/modules/getting-started-cli-examining-pod.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-examining-pod_{context}"] -= Examining the pod - -{product-title} leverages the Kubernetes concept of a pod, which is one or more containers deployed together on one host, and the smallest compute unit that can be defined, deployed, and managed. -Pods are the rough equivalent of a machine instance, physical or virtual, to a container. - -You can view the pods in your cluster and to determine the health of those pods and the cluster as a whole. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure -. To list all pods with node names, enter the following command: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -parksmap-5f9579955-6sng8 1/1 Running 0 77s ----- - -. To list all pod details, enter the following command: -+ -[source,terminal] ----- -$ oc describe pods ----- -+ -.Example output -[source,terminal] ----- -Name: parksmap-848bd4954b-5pvcc -Namespace: user-getting-started -Priority: 0 -Node: ci-ln-fr1rt92-72292-4fzf9-worker-a-g9g7c/10.0.128.4 -Start Time: Sun, 13 Feb 2022 14:14:14 -0500 -Labels: app=national-parks-app - app.kubernetes.io/part-of=national-parks-app - component=parksmap - deployment=parksmap - pod-template-hash=848bd4954b - role=frontend -Annotations: k8s.v1.cni.cncf.io/network-status: - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.14" - ], - "default": true, - "dns": {} - }] - k8s.v1.cni.cncf.io/network-status: - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.14" - ], - "default": true, - "dns": {} - }] - openshift.io/generated-by: OpenShiftNewApp - openshift.io/scc: restricted -Status: Running -IP: 10.131.0.14 -IPs: - IP: 10.131.0.14 -Controlled By: ReplicaSet/parksmap-848bd4954b -Containers: - parksmap: - Container ID: cri-o://4b2625d4f61861e33cc95ad6d455915ea8ff6b75e17650538cc33c1e3e26aeb8 - Image: quay.io/openshiftroadshow/parksmap@sha256:89d1e324846cb431df9039e1a7fd0ed2ba0c51aafbae73f2abd70a83d5fa173b - Image ID: quay.io/openshiftroadshow/parksmap@sha256:89d1e324846cb431df9039e1a7fd0ed2ba0c51aafbae73f2abd70a83d5fa173b - Port: 8080/TCP - Host Port: 0/TCP - State: Running - Started: Sun, 13 Feb 2022 14:14:25 -0500 - Ready: True - Restart Count: 0 - Environment: <none> - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-6f844 (ro) -Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True -Volumes: - kube-api-access-6f844: - Type: Projected (a volume that contains injected data from multiple sources) - TokenExpirationSeconds: 3607 - ConfigMapName: kube-root-ca.crt - ConfigMapOptional: <nil> - DownwardAPI: true - ConfigMapName: openshift-service-ca.crt - ConfigMapOptional: <nil> -QoS Class: BestEffort -Node-Selectors: <none> -Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s - node.kubernetes.io/unreachable:NoExecute op=Exists for 300s -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 46s default-scheduler Successfully assigned user-getting-started/parksmap-848bd4954b-5pvcc to ci-ln-fr1rt92-72292-4fzf9-worker-a-g9g7c - Normal AddedInterface 44s multus Add eth0 [10.131.0.14/23] from openshift-sdn - Normal Pulling 44s kubelet Pulling image "quay.io/openshiftroadshow/parksmap@sha256:89d1e324846cb431df9039e1a7fd0ed2ba0c51aafbae73f2abd70a83d5fa173b" - Normal Pulled 35s kubelet Successfully pulled image "quay.io/openshiftroadshow/parksmap@sha256:89d1e324846cb431df9039e1a7fd0ed2ba0c51aafbae73f2abd70a83d5fa173b" in 9.49243308s - Normal Created 35s kubelet Created container parksmap - Normal Started 35s kubelet Started container parksmap ----- diff --git a/modules/getting-started-cli-granting-permissions.adoc b/modules/getting-started-cli-granting-permissions.adoc deleted file mode 100644 index 814b42b5b3fc..000000000000 --- a/modules/getting-started-cli-granting-permissions.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-granting-permissions_{context}"] -= Granting view permissions - -{product-title} automatically creates a few special service accounts in every project. The default service account takes responsibility for running the pods. {product-title} uses and injects this service account into every pod that launches. - -The following procedure creates a `RoleBinding` object for the default `ServiceAccount` object. The service account communicates with the {product-title} API to learn about pods, services, and resources within the project. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. -* You must have `cluster-admin` or `project-admin` privileges. - -.Procedure - -* To add the view role to the default service account in the `user-getting-started project`, enter the following command: -+ -[source,terminal] ----- -$ oc adm policy add-role-to-user view -z default -n user-getting-started ----- diff --git a/modules/getting-started-cli-load-data-output.adoc b/modules/getting-started-cli-load-data-output.adoc deleted file mode 100644 index 6ae8521b30e7..000000000000 --- a/modules/getting-started-cli-load-data-output.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-load-data-output_{context}"] - -= Loading data and displaying the national parks map - -You deployed the `parksmap` and `nationalparks` applications and then deployed the `mongodb-nationalparks` database. However, no data has been loaded _into_ the database. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure - -. To load national parks data, enter the following command: -+ -[source,terminal] ----- -$ oc exec $(oc get pods -l component=nationalparks | tail -n 1 | awk '{print $1;}') -- curl -s http://localhost:8080/ws/data/load ----- -+ -.Example output -+ -[source,text] ----- -"Items inserted in database: 2893" ----- - -. To verify that your data is loaded properly, enter the following command: -+ -[source,terminal] ----- -$ oc exec $(oc get pods -l component=nationalparks | tail -n 1 | awk '{print $1;}') -- curl -s http://localhost:8080/ws/data/all ----- -+ -.Example output (trimmed) -+ -[source,terminal] ----- -, {"id": "Great Zimbabwe", "latitude": "-20.2674635", "longitude": "30.9337986", "name": "Great Zimbabwe"}] ----- - -. To add labels to the route, enter the following command: -+ -[source,terminal] ----- -$ oc label route nationalparks type=parksmap-backend ----- -+ -.Example output -+ -[source,terminal] ----- -route.route.openshift.io/nationalparks labeled ----- - -. To retrieve your routes to view your map, enter the following command: -+ -[source,terminal] ----- -$ oc get routes ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -nationalparks nationalparks-user-getting-started.apps.cluster.example.com nationalparks 8080-tcp edge None -parksmap parksmap-user-getting-started.apps.cluster.example.com parksmap 8080-tcp edge None ----- - -. Copy and paste the `HOST/PORT` path you retrieved above into your web browser. Your browser should display a map of the national parks across the world. -+ -.National parks across the world -image::getting-started-map-national-parks.png[A map of the national parks across the world is displayed with location tracking.] diff --git a/modules/getting-started-cli-login.adoc b/modules/getting-started-cli-login.adoc deleted file mode 100644 index 51a625acf322..000000000000 --- a/modules/getting-started-cli-login.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-login_{context}"] -= Logging in to the CLI - -You can log in to the OpenShift CLI (`oc`) to access and manage your cluster. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). - -.Procedure - -* Log into {product-title} from the CLI using your username and password, with an OAuth token, or with a web browser: -** With username and password: -+ -[source,terminal] ----- -$ oc login -u=<username> -p=<password> --server=<your-openshift-server> --insecure-skip-tls-verify ----- -** With an OAuth token: -+ -[source,terminal] ----- -$ oc login <https://api.your-openshift-server.com> --token=<tokenID> ----- -** With a web browser: -+ -[source,terminal] ----- -$ oc login <cluster_url> --web ----- - -You can now create a project or issue other commands for managing your cluster. diff --git a/modules/getting-started-cli-scaling-app.adoc b/modules/getting-started-cli-scaling-app.adoc deleted file mode 100644 index 20e433ac8ad4..000000000000 --- a/modules/getting-started-cli-scaling-app.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-cli.adoc - -:_content-type: PROCEDURE -[id="getting-started-cli-scaling-app_{context}"] -= Scaling the application - -In Kubernetes, a `Deployment` object defines how an application deploys. In most cases, users use `Pod`, `Service`, `ReplicaSets`, and `Deployment` resources together. In most cases, {product-title} creates the resources for you. - -When you deploy the `national-parks-app` image, a deployment resource is created. In this example, only one `Pod` is deployed. - -The following procedure scales the `national-parks-image` to use two instances. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). -* You have a deployed image. - -.Procedure - -* To scale your application from one pod instance to two pod instances, enter the following command: -+ -[source,terminal] ----- -$ oc scale --current-replicas=1 --replicas=2 deployment/parksmap ----- -+ -.Example output -[source,text] ----- -deployment.apps/parksmap scaled ----- - -.Verification -. To ensure that your application scaled properly, enter the following command: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -parksmap-5f9579955-6sng8 1/1 Running 0 7m39s -parksmap-5f9579955-8tgft 1/1 Running 0 24s ----- - -. To scale your application back down to one pod instance, enter the following command: -+ -[source,terminal] ----- -$ oc scale --current-replicas=2 --replicas=1 deployment/parksmap ----- diff --git a/modules/getting-started-openshift-common-terms.adoc b/modules/getting-started-openshift-common-terms.adoc deleted file mode 100644 index 66e46a9e2c2d..000000000000 --- a/modules/getting-started-openshift-common-terms.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-overview.adoc - -:_content-type: REFERENCE -[id="getting-started-openshift-common-terms_{context}"] -= Glossary of common terms for {product-title} - -This glossary defines common Kubernetes and {product-title} terms. - -Kubernetes:: -Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. - -Containers:: -Containers are application instances and components that run in OCI-compliant containers on the worker nodes. A container is the runtime of an Open Container Initiative (OCI)-compliant image. An image is a binary application. A worker node can run many containers. A node capacity is related to memory and CPU capabilities of the underlying resources whether they are cloud, hardware, or virtualized. - -Pod:: -A pod is one or more containers deployed together on one host. It consists of a colocated group of containers with shared resources such as volumes and IP addresses. A pod is also the smallest compute unit defined, deployed, and managed. -+ -In {product-title}, pods replace individual application containers as the smallest deployable unit. -+ -Pods are the orchestrated unit in {product-title}. {product-title} schedules and runs all containers in a pod on the same node. Complex applications are made up of many pods, each with their own containers. They interact externally and also with another inside the {product-title} environment. - -Replica set and replication controller:: -The Kubernetes replica set and the {product-title} replication controller are both available. The job of this component is to ensure the specified number of pod replicas are running at all times. If pods exit or are deleted, the replica set or replication controller starts more. If more pods are running than needed, the replica set deletes as many as necessary to match the specified number of replicas. - -Deployment and DeploymentConfig:: -{product-title} implements both Kubernetes `Deployment` objects and {product-title} `DeploymentConfigs` objects. Users may select either. -+ -`Deployment` objects control how an application is rolled out as pods. They identify the name of the container image to be taken from the registry and deployed as a pod on a node. They set the number of replicas of the pod to deploy, creating a replica set to manage the process. The labels indicated instruct the scheduler onto which nodes to deploy the pod. The set of labels is included in the pod definition that the replica set instantiates. -+ -`Deployment` objects are able to update the pods deployed onto the worker nodes based on the version of the `Deployment` objects and the various rollout strategies for managing acceptable application availability. {product-title} `DeploymentConfig` objects add the additional features of change triggers, which are able to automatically create new versions of the `Deployment` objects as new versions of the container image are available, or other changes. - -Service:: -A service defines a logical set of pods and access policies. It provides permanent internal IP addresses and hostnames for other applications to use as pods are created and destroyed. -+ -Service layers connect application components together. For example, a front-end web service connects to a database instance by communicating with its service. Services allow for simple internal load balancing across application components. {product-title} automatically injects service information into running containers for ease of discovery. - -Route:: -A route is a way to expose a service by giving it an externally reachable hostname, such as www.example.com. Each route consists of a route name, a service selector, and optionally a security configuration. A router can consume a defined route and the endpoints identified by its service to provide a name that lets external clients reach your applications. While it is easy to deploy a complete multi-tier application, traffic from anywhere outside the {product-title} environment cannot reach the application without the routing layer. - -Build:: -A build is the process of transforming input parameters into a resulting object. Most often, the process is used to transform input parameters or source code into a runnable image. A `BuildConfig` object is the definition of the entire build process. {product-title} leverages Kubernetes by creating containers from build images and pushing them to the integrated registry. - -Project:: -{product-title} uses projects to allow groups of users or developers to work together, serving as the unit of isolation and collaboration. It defines the scope of resources, allows project administrators and collaborators to manage resources, and restricts and tracks the user’s resources with quotas and limits. -+ -A project is a Kubernetes namespace with additional annotations. It is the central vehicle for managing access to resources for regular users. A project lets a community of users organize and manage their content in isolation from other communities. Users must receive access to projects from administrators. But cluster administrators can allow developers to create their own projects, in which case users automatically have access to their own projects. -+ -Each project has its own set of objects, policies, constraints, and service accounts. -+ -Projects are also known as namespaces. - -Operators:: -An Operator is a Kubernetes-native application. The goal of an Operator is to put operational knowledge into software. Previously this knowledge only resided in the minds of administrators, various combinations or shell scripts or automation software such as Ansible. It was outside your Kubernetes cluster and hard to integrate. With Operators, all of this changes. -+ -Operators are purpose-built for your applications. They implement and automate common Day 1 activities such as installation and configuration as well as Day 2 activities such as scaling up and down, reconfiguration, updates, backups, fail overs, and restores in a piece of software running inside your Kubernetes cluster by integrating natively with Kubernetes concepts and APIs. This is called a Kubernetes-native application. -+ -With Operators, applications must not be treated as a collection of primitives, such as pods, deployments, services, or config maps. Instead, Operators should be treated as a single object that exposes the options that make sense for the application. diff --git a/modules/getting-started-web-console-connecting-a-database.adoc b/modules/getting-started-web-console-connecting-a-database.adoc deleted file mode 100644 index 585c35aac815..000000000000 --- a/modules/getting-started-web-console-connecting-a-database.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-connecting-database_{context}"] -= Connecting to a database - -Deploy and connect a MongoDB database where the `national-parks-app` application stores location information. -Once you mark the `national-parks-app` application as a backend for the map visualization tool, `parksmap` deployment uses the {product-title} discover mechanism to display the map automatically. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -. From the *+Add* view in the *Developer* perspective, click *Container images* to open a dialog. -. In the *Image Name* field, enter `quay.io/centos7/mongodb-36-centos7`. -. In the *Runtime icon* field, search for `mongodb`. -. Scroll down to the *General* section. -. Ensure that you have the current values for the following: -.. Application: `national-parks-app` -.. Name: `mongodb-nationalparks` -. Select *Deployment* as the *Resource*. -. Unselect the checkbox next to *Create route to the application*. -. In the *Advanced Options* section, click *Deployment* to add environment variables to add the following environment variables: -+ -.Environment variable names and values -[cols="1,1"] -|=== -|Name |Value - -|`MONGODB_USER`|`mongodb` -|`MONGODB_PASSWORD`|`mongodb` -|`MONGODB_DATABASE`|`mongodb` -|`MONGODB_ADMIN_PASSWORD`|`mongodb` -|=== - -. Click *Create*. diff --git a/modules/getting-started-web-console-creating-new-project.adoc b/modules/getting-started-web-console-creating-new-project.adoc deleted file mode 100644 index 7fea42282167..000000000000 --- a/modules/getting-started-web-console-creating-new-project.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-creating-new-project_{context}"] -= Creating a new project - -A project enables a community of users to organize and manage their content in isolation. Projects are {product-title} extensions to Kubernetes namespaces. Projects have additional features that enable user self-provisioning. - -Users must receive access to projects from administrators. Cluster administrators can allow developers to create their own projects. In most cases, users automatically have access to their own projects. - -Each project has its own set of objects, policies, constraints, and service accounts. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have the appropriate roles and permissions in a project to create applications and other workloads in {product-title}. - -.Procedure - -. In the *+Add* view, select *Project* -> *Create Project*. -. In the *Name* field, enter `user-getting-started`. -. Optional: In the *Display name* field, enter `Getting Started with OpenShift`. -+ -[NOTE] -==== -*Display name* and *Description* fields are optional. -==== -. Click *Create*. - -You have created your first project on {product-title}. diff --git a/modules/getting-started-web-console-creating-secret.adoc b/modules/getting-started-web-console-creating-secret.adoc deleted file mode 100644 index 04fbef14e8bc..000000000000 --- a/modules/getting-started-web-console-creating-secret.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-creating-secret_{context}"] -= Creating a secret - -The `Secret` object provides a mechanism to hold sensitive information such as passwords, {product-title} client configuration files, private source repository credentials, and so on. -Secrets decouple sensitive content from the pods. You can mount secrets into containers using a volume plugin or the system can use secrets to perform actions on behalf of a pod. -The following procedure adds the secret `nationalparks-mongodb-parameters` and mounts it to the `nationalparks` workload. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -. From the *Developer* perspective, navigate to *Secrets* on the left hand navigation and click *Secrets*. -. Click *Create* -> *Key/value secret*. -.. In the *Secret name* field, enter `nationalparks-mongodb-parameters`. -.. Enter the following values for *Key* and *Value*: -+ -.Secret keys and values -[cols="1,1"] -|=== -|Key |Value - -|`MONGODB_USER`|`mongodb` -|`DATABASE_SERVICE_NAME`|`mongodb-nationalparks` -|`MONGODB_PASSWORD`|`mongodb` -|`MONGODB_DATABASE`|`mongodb` -|`MONGODB_ADMIN_PASSWORD`|`mongodb` -|=== - -.. Click *Create*. -. Click *Add Secret to workload*. -.. From the drop down menu, select `nationalparks` as the workload to add. -.. Click *Save*. - -This change in configuration triggers a new rollout of the `nationalparks` deployment with the environment variables properly injected. diff --git a/modules/getting-started-web-console-deploying-first-image.adoc b/modules/getting-started-web-console-deploying-first-image.adoc deleted file mode 100644 index 5e65cef4675f..000000000000 --- a/modules/getting-started-web-console-deploying-first-image.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-deploying-first-image_{context}"] -= Deploying your first image - -The simplest way to deploy an application in {product-title} is to run an existing container image. The following procedure deploys a front end component of an application called `national-parks-app`. The web application displays an interactive map. The map displays the location of major national parks across the world. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have the appropriate roles and permissions in a project to create applications and other workloads in {product-title}. - -.Procedure - -. From the *+Add* view in the *Developer* perspective, click *Container images* to open a dialog. -. In the *Image Name* field, enter the following: `quay.io/openshiftroadshow/parksmap:latest` -. Ensure that you have the current values for the following: -.. Application: `national-parks-app` -.. Name: `parksmap` -. Select *Deployment* as the *Resource*. -. Select *Create route to the application*. -. In the *Advanced Options* section, click *Labels* and add labels to better identify this deployment later. Labels help identify and filter components in the web console and in the command line. Add the following labels: -** `app=national-parks-app` -** `component=parksmap` -** `role=frontend` -. Click *Create*. - -You are redirected to the *Topology* page where you can see the `parksmap` deployment in the `national-parks-app` application. diff --git a/modules/getting-started-web-console-deploying-python-app.adoc b/modules/getting-started-web-console-deploying-python-app.adoc deleted file mode 100644 index f799d1a7f86d..000000000000 --- a/modules/getting-started-web-console-deploying-python-app.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-deploying-python-app_{context}"] -= Deploying a Python application - -The following procedure deploys a back-end service for the `parksmap` application. The Python application performs 2D geo-spatial queries against a MongoDB database to locate and return map coordinates of all national parks in the world. - -The deployed back-end service that is `nationalparks`. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -. From the *+Add* view in the *Developer* perspective, click *Import from Git* to open a dialog. -. Enter the following URL in the Git Repo URL field: -[x-]`https://github.com/openshift-roadshow/nationalparks-py.git` -+ -A builder image is automatically detected. -+ -[NOTE] -==== -If the detected builder image is Dockerfile, select *Edit Import Strategy*. Select *Builder Image* and then click *Python*. -==== -. Scroll to the *General* section. -. Ensure that you have the current values for the following: -.. Application: `national-parks-app` -.. Name: `nationalparks` -. Select *Deployment* as the *Resource*. -. Select *Create route to the application*. -. In the *Advanced Options* section, click *Labels* and add labels to better identify this deployment later. Labels help identify and filter components in the web console and in the command line. Add the following labels: -.. `app=national-parks-app` -.. `component=nationalparks` -.. `role=backend` -.. `type=parksmap-backend` -. Click *Create*. -. From the *Topology* view, select the `nationalparks` application. -+ -[NOTE] -==== -Click the *Resources* tab. In the *Builds* section, you can see your build running. -==== diff --git a/modules/getting-started-web-console-examining-pod.adoc b/modules/getting-started-web-console-examining-pod.adoc deleted file mode 100644 index 65134f3c9b5d..000000000000 --- a/modules/getting-started-web-console-examining-pod.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-examining-pod_{context}"] -= Examining the pod - -{product-title} leverages the Kubernetes concept of a pod, which is one or more containers deployed together on one host, and the smallest compute unit that can be defined, deployed, and managed. -Pods are the rough equivalent of a machine instance, physical or virtual, to a container. - -The *Overview* panel enables you to access many features of the `parksmap` deployment. The *Details* and *Resources* tabs enable you to scale application pods, check build status, services, and routes. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -* Click `D parksmap` in the *Topology* view to open the *Overview* panel. -+ -.Parksmap deployment -image::getting-started-examine-pod.png[] -+ -The *Overview* panel includes tabs for *Details*, *Resources*, and *Observe*. The *Details* tab might be displayed by default. - -+ -.Overview panel tab definitions -[cols="2,3"] -|=== -|Tab |Defintion - -|*Details* | Enables you to scale your application and view pod configuration such as labels, annotations, and the status of the application. -1.4+|*Resources* |Displays the resources that are associated with the deployment. -|Pods are the basic units of {product-title} applications. You can see how many pods are being used, what their status is, and you can view the logs. -|*Services* that are created for your pod and assigned ports are listed under the *Services* heading. -|*Routes* enable external access to the pods and a URL is used to access them. -|*Observe*|View various *Events* and *Metrics* information as it relates to your pod. -|=== diff --git a/modules/getting-started-web-console-granting-permissions.adoc b/modules/getting-started-web-console-granting-permissions.adoc deleted file mode 100644 index d9734cb23d72..000000000000 --- a/modules/getting-started-web-console-granting-permissions.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-granting-permissions_{context}"] -= Granting view permissions - -{product-title} automatically creates a few special service accounts in every project. The default service account takes responsibility for running the pods. {product-title} uses and injects this service account into every pod that launches. - -The following procedure creates a `RoleBinding` object for the default `ServiceAccount` object. The service account communicates with the {product-title} API to learn about pods, services, and resources within the project. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You have a deployed image. -* You are in the *Administrator* perspective. - -.Procedure - -. Navigate to *User Management* and then click *RoleBindings*. -. Click *Create binding*. -. Select `Namespace role binding (RoleBinding)`. -. In the *Name* field, enter `sa-user-account`. -. In the *Namespace* field, search for and select `user-getting-started`. -. In the *Role name* field, search for `view` and select `view`. -. In the *Subject* field, select `ServiceAccount`. -. In the *Subject namespace* field, search for and select `user-getting-started`. -. In the *Subject name* field, enter `default`. -. Click *Create*. diff --git a/modules/getting-started-web-console-load-data-output.adoc b/modules/getting-started-web-console-load-data-output.adoc deleted file mode 100644 index 96dd70dda84b..000000000000 --- a/modules/getting-started-web-console-load-data-output.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-load-data-output_{context}"] - -= Loading data and displaying the national parks map - -You deployed the `parksmap` and `nationalparks` applications and then deployed the `mongodb-nationalparks` database. However, no data has been loaded _into_ the database. -Before loading the data, add the proper labels to the `mongodb-nationalparks` and `nationalparks` deployment. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -. From the *Topology* view, navigate to `nationalparks` deployment and click *Resources* and retrieve your route information. -. Copy and paste the URL into your web browser and add the following at the end of the URL: -+ -[source,text] ----- -/ws/data/load ----- -+ -.Example output -+ -[source,text] ----- -Items inserted in database: 2893 ----- -. From the *Topology* view, navigate to `parksmap` deployment and click *Resources* and retrieve your route information. -. Copy and paste the URL into your web browser to view your national parks across the world map. - -.National parks across the world -image::getting-started-map-national-parks.png[] diff --git a/modules/getting-started-web-console-login.adoc b/modules/getting-started-web-console-login.adoc deleted file mode 100644 index 29868c71a24f..000000000000 --- a/modules/getting-started-web-console-login.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-login_{context}"] -= Logging in to the web console - -You can log in to the {product-title} web console to access and manage your cluster. - -.Prerequisites - -* You must have access to an {product-title} cluster. - -.Procedure - -* Log in to the {product-title} web console using your login credentials. - -You are redirected to the *Projects* page. For non-administrative users, the default view is the *Developer* perspective. For cluster administrators, the default view is the *Administrator* perspective. If you do not have `cluster-admin` privileges, you will not see the *Administrator* perspective in your web console. - -The web console provides two perspectives: the *Administrator* perspective and *Developer* perspective. The *Developer* perspective provides workflows specific to the developer use cases. - -.Perspective switcher -image::web_console_perspectives.png[500,300] - -Use the perspective switcher to switch to the *Developer* perspective. The *Topology* view with options to create an application is displayed. diff --git a/modules/getting-started-web-console-scaling-app.adoc b/modules/getting-started-web-console-scaling-app.adoc deleted file mode 100644 index fa7f80b108e3..000000000000 --- a/modules/getting-started-web-console-scaling-app.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * getting-started/openshift-web-console.adoc - -:_content-type: PROCEDURE -[id="getting-started-web-console-scaling-app_{context}"] -= Scaling the application - -In Kubernetes, a `Deployment` object defines how an application deploys. In most cases, users use `Pod`, `Service`, `ReplicaSets`, and `Deployment` resources together. In most cases, {product-title} creates the resources for you. - -When you deploy the `national-parks-app` image, a deployment resource is created. In this example, only one `Pod` is deployed. - -The following procedure scales the `national-parks-image` to use two instances. - -.Prerequisites - -* You are logged in to the {product-title} web console. -* You are in the *Developer* perspective. -* You have a deployed image. - -.Procedure - -. In the *Topology* view, click the `national-parks-app` application. -. Click the *Details* tab. -. Use the up arrow to scale the pod to two instances. -+ -.Scaling application -image::getting-started-scaling-pod.png[500,300] -+ -[NOTE] -==== -Application scaling can happen quickly because {product-title} is launching a new instance of an existing image. -==== - -. Use the down arrow to scale the pod down to one instance. diff --git a/modules/gitops-additional-permissions-for-cluster-config.adoc b/modules/gitops-additional-permissions-for-cluster-config.adoc deleted file mode 100644 index 1dae04ccca55..000000000000 --- a/modules/gitops-additional-permissions-for-cluster-config.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assembly: -// -// * configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: PROCEDURE -[id="gitops-additional-permissions-for-cluster-config_{context}"] -= Adding permissions for cluster configuration - -You can grant permissions for an Argo CD instance to manage cluster configuration. Create a cluster role with additional permissions and then create a new cluster role binding to associate the cluster role with a service account. - -.Procedure - -. Log in to the {product-title} web console as an admin. -. In the web console, select **User Management** -> **Roles** -> **Create Role**. Use the following `ClusterRole` YAML template to add rules to specify the additional permissions. -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: secrets-cluster-role -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["*"] ----- -. Click **Create** to add the cluster role. -. Now create the cluster role binding. In the web console, select **User Management** -> **Role Bindings** -> **Create Binding**. -. Select **All Projects** from the **Project** drop-down. -. Click **Create binding**. -. Select **Binding type** as **Cluster-wide role binding (ClusterRoleBinding)**. -. Enter a unique value for the **RoleBinding name**. -. Select the newly created cluster role or an existing cluster role from the drop down list. -. Select the **Subject** as **ServiceAccount** and the provide the **Subject namespace** and **name**. -.. **Subject namespace**: `openshift-gitops` -.. **Subject name**: `openshift-gitops-argocd-application-controller` -. Click **Create**. The YAML file for the `ClusterRoleBinding` object is as follows: -+ -[source,yaml] ----- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cluster-role-binding -subjects: - - kind: ServiceAccount - name: openshift-gitops-argocd-application-controller - namespace: openshift-gitops -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: admin ----- - diff --git a/modules/gitops-additional-steps-for-disconnected-clusters.adoc b/modules/gitops-additional-steps-for-disconnected-clusters.adoc deleted file mode 100644 index 92fed7003276..000000000000 --- a/modules/gitops-additional-steps-for-disconnected-clusters.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: PROCEDURE -[id="gitops-additional-steps-disconnected-clusters_{context}"] -= Integrating Keycloak with the OpenShift OAuth server in a disconnected cluster - -In a disconnected cluster, Keycloak communicates with the OpenShift OAuth server through a proxy. - -.Procedure - -Follow these steps to integrate Keycloak with the OpenShift OAuth server: - -. Log in to the Keycloak pod: -+ -[source,terminal] ----- -$ oc exec -it dc/keycloak -n argocd -- /bin/bash ----- -. Launch the JBoss CLI tool to set up the proxy mappings: -+ -[source,terminal] ----- -/opt/eap/bin/jboss-cli.sh ----- -. In the JBoss CLI tool, run the following command to start an embedded standalone server: -+ -[source,terminal] ----- -embed-server --server-config=standalone-openshift.xml ----- -. Set up proxy mappings for the OpenShift OAuth server host: -+ -[source,terminal] ----- -/subsystem=keycloak-server/spi=connectionsHttpClient/provider=default:write-attribute(name=properties.proxy-mappings,value=["<oauth-server-hostname>;http://<proxy-server-host>:<proxy-server-port>"]) ----- -. Stop the embedded server: -+ -[source,terminal] ----- -quit ----- -. Reload the JBoss CLI tool to apply the proxy mappings: -+ -[source,terminal] ----- -/opt/eap/bin/jboss-cli.sh --connect --command=:reload ----- \ No newline at end of file diff --git a/modules/gitops-argo-cd-installation.adoc b/modules/gitops-argo-cd-installation.adoc deleted file mode 100644 index 5bb2536336ce..000000000000 --- a/modules/gitops-argo-cd-installation.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/gitops/setting-up-argocd-instance.adoc - -:_content-type: PROCEDURE -[id="gitops-argo-cd-installation_{context}"] -= Installing Argo CD - -To manage cluster configurations or deploy applications, you can install and deploy a new Argo CD instance. - -.Procedure -. Log in to the {product-title} web console. - -. Click *Operators* -> *Installed Operators*. - -. Create or select the project where you want to install the Argo CD instance from the *Project* drop-down menu. - -. Select *OpenShift GitOps Operator* from the installed operators and select the *Argo CD* tab. - -. Click *Create* to configure the parameters: - -.. Enter the **Name** of the instance. By default, the *Name* is set to *argocd*. - -.. Create an external OS Route to access Argo CD server. Click *Server* -> *Route* and check *Enabled*. - -. To open the Argo CD web UI, click the route by navigating to **Networking -> Routes -> <instance name>-server** in the project where the Argo CD instance is installed. \ No newline at end of file diff --git a/modules/gitops-argo-cd-notification.adoc b/modules/gitops-argo-cd-notification.adoc deleted file mode 100644 index 62b6b7e551e9..000000000000 --- a/modules/gitops-argo-cd-notification.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * argo-cd-custom-resource-properties.adoc - -:_content-type: PROCEDURE -[id="gitops-argo-cd-notification_{context}"] -= Enabling notifications with Argo CD instance - -To enable or disable the link:https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/[Argo CD notifications controller], set a parameter in the Argo CD custom resource. By default, notifications are disabled. To enable notifications, set the `enabled` parameter to `true` in the `.yaml` file: - -.Procedure - -. Set the `enabled` parameter to `true`: - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd -spec: - notifications: - enabled: true ----- \ No newline at end of file diff --git a/modules/gitops-argo-cd-properties.adoc b/modules/gitops-argo-cd-properties.adoc deleted file mode 100644 index da03b3c77ef4..000000000000 --- a/modules/gitops-argo-cd-properties.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * argo-cd-custom-resource-properties.adoc - -:_content-type: REFERENCE -[id="argo-cd-properties_{context}"] -= Argo CD custom resource properties - -[role="_abstract"] -The Argo CD Custom Resource consists of the following properties: - -|=== -|**Name** |**Description** |**Default** | **Properties** -|`ApplicationInstanceLabelKey` |The `metadata.label` key name where Argo CD injects the app name as a tracking label.|`app.kubernetes.io/instance` | -|`ApplicationSet` -|`ApplicationSet` controller configuration options. -| `_<Object>_` -a|* _<Image>_ - The container image for the `ApplicationSet` controller. This overrides the `ARGOCD_APPLICATIONSET_IMAGE` environment variable. - * _<Version>_ - The tag to use with the `ApplicationSet` container image. - * _<Resources>_ - The container compute resources. - * _<LogLevel>_ - The log level used by the Argo CD Application Controller component. Valid options are `debug`, `info`, `error`, and `warn`. - * _<LogFormat>_ - The log format used by the Argo CD Application Controller component. Valid options are `text` or `json`. - * _<PrallelismLimit>_ - The kubectl parallelism limit to set for the controller `(--kubectl-parallelism-limit flag)`. -|`ConfigManagementPlugins` |Add a configuration management plugin.| `__<empty>__` | -|`Controller` |Argo CD Application Controller options.| `__<Object>__` -a|* _<Processors.Operation>_ - The number of operation processors. - * _<Processors.Status>_ - The number of status processors. - * _<Resources>_ - The container compute resources. - * _<LogLevel>_ - The log level used by the Argo CD Application Controller component. Valid options are `debug`, `info`, `error`, and `warn`. - * _<AppSync>_ - AppSync is used to control the sync frequency of Argo CD applications - * _<Sharding.enabled>_ - Enable sharding on the Argo CD Application Controller component. This property is used to manage a large number of clusters to relieve memory pressure on the controller component. - * _<Sharding.replicas>_ - The number of replicas that will be used to support sharding of the Argo CD Application Controller. - * _<Env>_ - Environment to set for the application controller workloads. -|`DisableAdmin` |Disables the built-in admin user.|`false` | -|`GATrackingID` |Use a Google Analytics tracking ID.|`__<empty>__` | -|`GAAnonymizeusers` |Enable hashed usernames sent to google analytics.|`false` | -|`HA` |High availablity options.| `__<Object>__` -a|* _<Enabled>_ - Toggle high availability support globally for Argo CD. - * _<RedisProxyImage>_ - The Redis HAProxy container image. This overrides the `ARGOCD_REDIS_HA_PROXY_IMAGE` environment variable. - * _<RedisProxyVersion>_ - The tag to use for the Redis HAProxy container image. -|`HelpChatURL` |URL for getting chat help (this will typically be your Slack channel for support).|`https://mycorp.slack.com/argo-cd` | -|`HelpChatText` |The text that appears in a text box for getting chat help.|`Chat now!`| -|`Image` |The container image for all Argo CD components. This overrides the `ARGOCD_IMAGE` environment variable.|`argoproj/argocd` | -|`Ingress` |Ingress configuration options.| `__<Object>__` | -|`InitialRepositories` |Initial Git repositories to configure Argo CD to use upon creation of the cluster.|`__<empty>__` | -|`Notifications` |Notifications controller configuration options.|`__<Object>__` -a|* _<Enabled>_ - The toggle to start the notifications-controller. - * _<Image>_ - The container image for all Argo CD components. This overrides the `ARGOCD_IMAGE` environment variable. - * _<Version>_ - The tag to use with the Notifications container image. - * _<Resources>_ - The container compute resources. - * _<LogLevel>_ - The log level used by the Argo CD Application Controller component. Valid options are `debug`, `info`, `error`, and `warn`. -|`RepositoryCredentials` |Git repository credential templates to configure Argo CD to use upon creation of the cluster.| `__<empty>__` | -|`InitialSSHKnownHosts` |Initial SSH Known Hosts for Argo CD to use upon creation of the cluster.| `__<default_Argo_CD_Known_Hosts>__` | -|`KustomizeBuildOptions` |The build options and parameters to use with `kustomize build`.|`__<empty>__` | -|`OIDCConfig` |The OIDC configuration as an alternative to Dex.|`__<empty>__` | -|`NodePlacement` |Add the `nodeSelector` and the `tolerations`.|`__<empty>__` | -|`Prometheus` |Prometheus configuration options.|`__<Object>__` -a|* _<Enabled>_ - Toggle Prometheus support globally for Argo CD. - * _<Host>_ - The hostname to use for Ingress or Route resources. - * _<Ingress>_ - Toggles Ingress for Prometheus. - * _<Route>_ - Route configuration options. - * _<Size>_ - The replica count for the Prometheus `StatefulSet`. -|`RBAC` |RBAC configuration options.|`__<Object>__` -a|* _<DefaultPolicy>_ - The `policy.default` property in the `argocd-rbac-cm` config map. The name of the default role which Argo CD will fall back to, when authorizing API requests. - * _<Policy>_ - The `policy.csv` property in the `argocd-rbac-cm` config map. CSV data containing user-defined RBAC policies and role definitions. - * _<Scopes>_ - The scopes property in the `argocd-rbac-cm` config map. Controls which OIDC scopes to examine during RBAC enforcement (in addition to sub scope). -|`Redis` |Redis configuration options.|`__<Object>__` -a|* _<AutoTLS>_ - Use the provider to create the Redis server's TLS certificate (one of: openshift). Currently only available for {product-title}. - * _<DisableTLSVerification>_ - Define whether the Redis server should be accessed using strict TLS validation. - * _<Image>_ - The container image for Redis. This overrides the `ARGOCD_REDIS_IMAGE` environment variable. - * _<Resources>_ - The container compute resources. - * _<Version>_ - The tag to use with the Redis container image. -|`ResourceCustomizations` |Customize resource behavior.|`__<empty>__` | -|`ResourceExclusions` |Completely ignore entire classes of resource group.|`__<empty>__` | -|`ResourceInclusions` |The configuration to configure which resource group/kinds are applied.|`__<empty>__` | -|`Server` |Argo CD Server configuration options.|`__<Object>__` -a|* _<Autoscale>_ - Server autoscale configuration options. - * _<ExtraCommandArgs>_ - List of arguments added to the existing arguments set by the Operator. - * _<GRPC>_ - GRPC configuration options. - * _<Host>_ - The hostname used for Ingress or Route resources. - * _<Ingress>_ - Ingress configuration for the Argo CD server component. - * _<Insecure>_ - Toggles the insecure flag for Argo CD server. - * _<Resources>_ - The container compute resources. - * _<Replicas>_ - The number of replicas for the Argo CD server. Must be greater than or equal to `0`. If `Autoscale` is enabled, `Replicas` is ignored. - * _<Route>_ - Route configuration options. - * _<Service.Type>_ - The `ServiceType` used for the service resource. - * _<LogLevel>_ - The log level to be used by the Argo CD Server component. Valid options are `debug`, `info`, `error`, and `warn`. - * _<LogFormat>_ - The log format used by the Argo CD Application Controller component. Valid options are `text` or `json`. - * _<Env>_ - Environment to set for the server workloads. -|`SSO` |Single Sign-on options.|`__<Object>__` -a|* _<Image>_ - The container image for Keycloak. This overrides the `ARGOCD_KEYCLOAK_IMAGE` environment variable. - * _<Keycloak>_ - Configuration options for Keycloak SSO provider. - * _<Dex>_ - Configuration options for Dex SSO provider. - * _<Provider>_ - The name of the provider used to configure Single Sign-on. For now the supported options are Dex and Keycloak. - * _<Resources>_ - The container compute resources. - * _<VerifyTLS>_ - Whether to enforce strict TLS checking when communicating with Keycloak service. - * _<Version>_ - The tag to use with the Keycloak container image. -|`StatusBadgeEnabled` |Enable application status badge.|`true` | -|`TLS` |TLS configuration options.|`__<Object>__` -a|* _<CA.ConfigMapName>_ - The name of the `ConfigMap` which contains the CA certificate. - * _<CA.SecretName>_ - The name of the secret which contains the CA Certificate and Key. - * _<InitialCerts>_ - Initial set of certificates in the `argocd-tls-certs-cm` config map for connecting Git repositories via HTTPS. -|`UserAnonyousEnabled` |Enable anonymous user access.|`true` | -|`Version` |The tag to use with the container image for all Argo CD components.|Latest Argo CD version| -|`Banner` |Add a UI banner message.|`__<Object>__` -a|* _<Banner.Content>_ - The banner message content (required if a banner is displayed). - * _<Banner.URL.SecretName>_ - The banner message link URL (optional). -|=== - - - diff --git a/modules/gitops-configuring-argo-cd-oidc.adoc b/modules/gitops-configuring-argo-cd-oidc.adoc deleted file mode 100644 index 6220c78722b3..000000000000 --- a/modules/gitops-configuring-argo-cd-oidc.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module is included in the following assemblies: -// -// * cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc - -[id="configuring-argo-cd-oidc_{context}"] -= Configuring Argo CD OIDC - -To configure Argo CD OpenID Connect (OIDC), you must generate your client secret, encode it, and add it to your custom resource. - -.Prerequisites - -* You have obtained your client secret. - -.Procedure - -. Store the client secret you generated. - -.. Encode the client secret in base64: -+ -[source,terminal] ----- -$ echo -n '83083958-8ec6-47b0-a411-a8c55381fbd2' | base64 ----- - -.. Edit the secret and add the base64 value to an `oidc.keycloak.clientSecret` key: -+ -[source,terminal] ----- -$ oc edit secret argocd-secret -n <namespace> ----- -+ -.Example YAML of the secret -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: argocd-secret -data: - oidc.keycloak.clientSecret: ODMwODM5NTgtOGVjNi00N2IwLWE0MTEtYThjNTUzODFmYmQy ----- - -. Edit the `argocd` custom resource and add the OIDC configuration to enable the Keycloak authentication: -+ -[source,terminal] ----- -$ oc edit argocd -n <your_namespace> ----- -+ -.Example of `argocd` custom resource -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - creationTimestamp: null - name: argocd - namespace: argocd -spec: - resourceExclusions: | - - apiGroups: - - tekton.dev - clusters: - - '*' - kinds: - - TaskRun - - PipelineRun - oidcConfig: | - name: OpenShift Single Sign-On - issuer: https://keycloak.example.com/auth/realms/myrealm <1> - clientID: argocd <2> - clientSecret: $oidc.keycloak.clientSecret <3> - requestedScopes: ["openid", "profile", "email", "groups"] <4> - server: - route: - enabled: true ----- -<1> `issuer` must end with the correct realm name (in this example `myrealm`). -<2> `clientID` is the Client ID you configured in your Keycloak account. -<3> `clientSecret` points to the right key you created in the argocd-secret secret. -<4> `requestedScopes` contains the groups claim if you did not add it to the Default scope. diff --git a/modules/gitops-configuring-argo-cd-using-dex-github-conector.adoc b/modules/gitops-configuring-argo-cd-using-dex-github-conector.adoc deleted file mode 100644 index cdc371b4783c..000000000000 --- a/modules/gitops-configuring-argo-cd-using-dex-github-conector.adoc +++ /dev/null @@ -1,33 +0,0 @@ -[id="gitops-configuring-argo-cd-using-dex-github-connector_{context}"] -= Configuring Argo CD SSO using Dex GitHub connector - -[discrete] -.Procedure - -. Register the application in the identity provider as explained link:https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#1-register-the-application-in-the-identity-provider[here] -. Update the Argo CD CR. -. In the `dex.config` key, add the GitHub connector to the connectors sub field. See the link:https://github.com/dexidp/website/blob/main/content/docs/connectors/github.md[Dex GitHub connector documentation]. After adding the `clientID` and the `clientSecret` will be populated. -. Optional: In the `connectors.config.orgs` list, add one or more GitHub organizations. Any member of the org will then be able to login to Argo CD to perform management tasks. -+ -[source,yaml] ----- - apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: openshift-oauth -spec: - dex: - config: | - connectors: - # GitHub example - - type: github - id: github - name: GitHub - config: - clientID: xxxxxxxxxxxxxx - clientSecret: $dex.github.clientSecret # Alternatively $<some_K8S_secret>:dex.github.clientSecret - orgs: - - name: dummy-org ----- \ No newline at end of file diff --git a/modules/gitops-configuring-groups-and-argocd-rbac.adoc b/modules/gitops-configuring-groups-and-argocd-rbac.adoc deleted file mode 100644 index 5b5df6cd84f2..000000000000 --- a/modules/gitops-configuring-groups-and-argocd-rbac.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift.adoc - -[id="configuring-groups-and-argocd-rbac_{context}"] -= Configure groups and Argo CD RBAC - -Role-based access control (RBAC) allows you to provide relevant permissions to users. - -.Prerequisites - -* You have created the `ArgoCDAdmins` group in Keycloak. - -* The user you want to give permissions to has logged in to Argo CD. - -.Procedure - -. In the Keycloak dashboard navigate to *Users* -> *Groups*. Add the user to the Keycloak group `ArgoCDAdmins`. - -. Ensure that `ArgoCDAdmins` group has the required permissions in the `argocd-rbac` config map. -** Edit the config map: -+ -[source,terminal] ----- -$ oc edit configmap argocd-rbac-cm -n <namespace> ----- -+ -.Example of a config map that defines `admin` permissions. -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-rbac-cm -data: - policy.csv: | - g, /ArgoCDAdmins, role:admin ----- diff --git a/modules/gitops-configuring-the-groups-claim.adoc b/modules/gitops-configuring-the-groups-claim.adoc deleted file mode 100644 index 9255abba3402..000000000000 --- a/modules/gitops-configuring-the-groups-claim.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift.adoc - -[id="configuring-the-groups-claim_{context}"] -= Configuring the groups claim - -To manage users in Argo CD, you must configure a groups claim that can be included in the authentication token. - -.Procedure - -. In the Keycloak dashboard, navigate to *Client Scope* and add a new client with the following values: -Name:: `groups` -Protocol:: `openid-connect` -Display On Content Scope:: `On` -Include to Token Scope:: `On` - -. Click *Save* and navigate to `groups` -> *Mappers*. - -. Add a new token mapper with the following values: -Name:: `groups` -Mapper Type:: `Group Membership` -Token Claim Name:: `groups` -+ -The token mapper adds the `groups` claim to the token when the client requests `groups`. - -. Navigate to *Clients* -> *Client Scopes* and configure the client to provide the groups scope. Select `groups` in the *Assigned Default Client Scopes* table and click *Add selected*. The `groups` scope must be in the *Available Client Scopes* table. - -. Navigate to *Users* -> *Admin* -> *Groups* and create a group `ArgoCDAdmins`. diff --git a/modules/gitops-creating-a-new-client-in-dex.adoc b/modules/gitops-creating-a-new-client-in-dex.adoc deleted file mode 100644 index 595bc36817e1..000000000000 --- a/modules/gitops-creating-a-new-client-in-dex.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift -[id="gitops-creating-a-new-client-in-dex_{context}"] -= Enabling the Dex OpenShift OAuth Connector - -Dex uses the users and groups defined within OpenShift by checking the `OAuth` server provided by the platform. The following example shows the properties of Dex along with example configurations: - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: openshift-oauth -spec: - dex: - openShiftOAuth: true <1> - groups:<2> - - default - rbac:<3> - defaultPolicy: 'role:readonly' - policy: | - g, cluster-admins, role:admin - scopes: '[groups]' ----- -<1> The `openShiftOAuth` property triggers the Operator to automatically configure the built-in OpenShift `OAuth` server when the value is set to `true`. -<2> The `groups` property allows users of the specified group(s) to log in. -<3> The RBAC policy property assigns the admin role in the Argo CD cluster to users in the OpenShift `cluster-admins` group. \ No newline at end of file diff --git a/modules/gitops-creating-a-new-client-in-keycloak.adoc b/modules/gitops-creating-a-new-client-in-keycloak.adoc deleted file mode 100644 index 9f62fc6703d8..000000000000 --- a/modules/gitops-creating-a-new-client-in-keycloak.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift - -[id="creating-a-new-client-in-keycloak_{context}"] -= Creating a new client in Keycloak - -.Procedure - -. Log in to your Keycloak server, select the realm you want to use, navigate to the *Clients* page, and then click *Create* in the upper-right section of the screen. - -. Specify the following values: -Client ID:: `argocd` -Client Protocol:: `openid-connect` -Route URL:: <your-argo-cd-route-url> -Access Type:: `confidential` -Valid Redirect URIs:: <your-argo-cd-route-url>/auth/callback -Base URL:: `/applications` - -. Click *Save* to see the *Credentials* tab added to the *Client* page. - -. Copy the secret from the *Credentials* tab for further configuration. diff --git a/modules/gitops-creating-a-new-client-using-keycloak.adoc b/modules/gitops-creating-a-new-client-using-keycloak.adoc deleted file mode 100644 index edf490e1d0f5..000000000000 --- a/modules/gitops-creating-a-new-client-using-keycloak.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-sso-for-argo-cd-using-keycloak.adoc - -:_content-type: PROCEDURE -[id="gitops-creating-a-new-client-in-keycloak_{context}"] -= Configuring a new client in Keycloak - -Dex is installed by default for all the Argo CD instances created by the Operator. However, you can delete the Dex configuration and add Keycloak instead to log in to Argo CD using your OpenShift credentials. Keycloak acts as an identity broker between Argo CD and OpenShift. - -.Procedure - -To configure Keycloak, follow these steps: - -. Delete the Dex configuration by removing the `.spec.sso.dex` parameter from the Argo CD custom resource (CR), and save the CR: -+ -[source,yaml] ----- -dex: - openShiftOAuth: true - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: ----- - -. Set the value of the `provider` parameter to `keycloak` in the Argo CD CR. - -. Configure Keycloak by performing one of the following steps: - -* For a secure connection, set the value of the `rootCA` parameter as shown in the following example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - sso: - provider: keycloak - keycloak: - rootCA: "<PEM-encoded-root-certificate>" <1> - server: - route: - enabled: true ----- -<1> A custom certificate used to verify the Keycloak's TLS certificate. -+ -The Operator reconciles changes in the `.spec.keycloak.rootCA` parameter and updates the `oidc.config` parameter with the PEM encoded root certificate in the `argocd-cm` configuration map. - -* For an insecure connection, leave the value of the `rootCA` parameter empty and use the `oidc.tls.insecure.skip.verify` parameter as shown below: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - extraConfig: - oidc.tls.insecure.skip.verify: "true" - sso: - provider: keycloak - keycloak: - rootCA: "" ----- - -[NOTE] -==== -The Keycloak instance takes 2-3 minutes to install and run. -==== - diff --git a/modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc b/modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc deleted file mode 100644 index f28510a4af03..000000000000 --- a/modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-an-openshift-cluster-with-argo-cd.adoc -// * depoying-an-application-with-argo-cd.adoc - -ifeval::["{context}" == "configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations"] -:cluster: -endif::[] -ifeval::["{context}" == "deploying-a-spring-boot-application-with-argo-cd"] -:app: -endif::[] - -:_content-type: PROCEDURE -[id="creating-an-application-by-using-the-argo-cd-dashboard_{context}"] -= Creating an application by using the Argo CD dashboard - -Argo CD provides a dashboard which allows you to create applications. - -ifdef::cluster[] -This sample workflow walks you through the process of configuring Argo CD to recursively sync the content of the `cluster` directory to the `cluster-configs` application. The directory defines the {product-title} web console cluster configurations that add a link to the *Red Hat Developer Blog - Kubernetes* under the {rh-app-icon} menu in the web console, and defines a namespace `spring-petclinic` on the cluster. -endif::cluster[] - -.Procedure - -. In the Argo CD dashboard, click *NEW APP* to add a new Argo CD application. - -ifdef::cluster[] -. For this workflow, create a *cluster-configs* application with the following configurations: -+ -Application Name:: `cluster-configs` -Project:: `default` -Sync Policy:: `Manual` -Repository URL:: `https://github.com/redhat-developer/openshift-gitops-getting-started` -Revision:: `HEAD` -Path:: `cluster` -Destination:: `https://kubernetes.default.svc` -Namespace:: `spring-petclinic` -Directory Recurse:: `checked` -endif::cluster[] - -ifdef::app[] -. For this workflow, create a *spring-petclinic* application with the following configurations: -+ -Application Name:: `spring-petclinic` -Project:: `default` -Sync Policy:: `Automatic` -Repository URL:: `https://github.com/redhat-developer/openshift-gitops-getting-started` -Revision:: `HEAD` -Path:: `app` -Destination:: `https://kubernetes.default.svc` -Namespace:: `spring-petclinic` -endif::app[] - -. Click *CREATE* to create your application. - -. Open the *Administrator* perspective of the web console and navigate to *Administration* -> *Namespaces* in the menu on the left. - -. Search for and select the namespace, then enter `argocd.argoproj.io/managed-by=openshift-gitops` in the *Label* field so that the Argo CD instance in the `openshift-gitops` namespace can manage your namespace. diff --git a/modules/gitops-creating-an-application-by-using-the-oc-tool.adoc b/modules/gitops-creating-an-application-by-using-the-oc-tool.adoc deleted file mode 100644 index ff283c613777..000000000000 --- a/modules/gitops-creating-an-application-by-using-the-oc-tool.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-an-openshift-cluster-with-argo-cd.adoc -// * depoying-an-application-with-argo-cd.adoc - -ifeval::["{context}" == "configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations"] -:cluster: -endif::[] -ifeval::["{context}" == "deploying-a-spring-boot-application-with-argo-cd"] -:app: -endif::[] - -:_content-type: PROCEDURE -[id="creating-an-application-by-using-the-oc-tool_{context}"] -= Creating an application by using the `oc` tool - -You can create Argo CD applications in your terminal by using the `oc` tool. - -.Procedure - -. Download link:https://github.com/redhat-developer/openshift-gitops-getting-started[the sample application]: -+ -[source,terminal] ----- -$ git clone git@github.com:redhat-developer/openshift-gitops-getting-started.git ----- - -. Create the application: -ifdef::app[] -+ -[source,terminal] ----- -$ oc create -f openshift-gitops-getting-started/argo/app.yaml ----- -endif::app[] - -ifdef::cluster[] -+ -[source,terminal] ----- -$ oc create -f openshift-gitops-getting-started/argo/app.yaml ----- -endif::cluster[] - -. Run the `oc get` command to review the created application: -+ -[source,terminal] ----- -$ oc get application -n openshift-gitops ----- - -. Add a label to the namespace your application is deployed in so that the Argo CD instance in the `openshift-gitops` namespace can manage it: - -ifdef::app[] -+ -[source,terminal] ----- -$ oc label namespace spring-petclinic argocd.argoproj.io/managed-by=openshift-gitops ----- -endif::app[] -ifdef::cluster[] -+ -[source,terminal] ----- -$ oc label namespace spring-petclinic argocd.argoproj.io/managed-by=openshift-gitops ----- -endif::cluster[] diff --git a/modules/gitops-customize-argo-cd-consolelink.adoc b/modules/gitops-customize-argo-cd-consolelink.adoc deleted file mode 100644 index 5b97884ca019..000000000000 --- a/modules/gitops-customize-argo-cd-consolelink.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assembly: -// -// * cicd/gitops/setting-up-argocd-instance.adoc - -:_content-type: PROCEDURE -[id="gitops-customize-argo-cd-consolelink_{context}"] -= Customizing the Argo CD console link - -In a multi-tenant cluster, users might have to deal with multiple instances of Argo CD. For example, after installing an Argo CD instance in your namespace, you might find a different Argo CD instance attached to the Argo CD console link, instead of your own Argo CD instance, in the Console Application Launcher. - -You can customize the Argo CD console link by setting the `DISABLE_DEFAULT_ARGOCD_CONSOLELINK` environment variable: - -* When you set `DISABLE_DEFAULT_ARGOCD_CONSOLELINK` to `true`, the Argo CD console link is permanently deleted. -* When you set `DISABLE_DEFAULT_ARGOCD_CONSOLELINK` to `false` or use the default value, the Argo CD console link is temporarily deleted and visible again when the Argo CD route is reconciled. - -.Prerequisites -* You have logged in to the {product-title} cluster as an administrator. -* You have installed the {gitops-title} Operator. - -.Procedure - -. In the *Administrator* perspective, navigate to *Administration* -> *CustomResourceDefinitions*. -. Find the *Subscription* CRD and click to open it. -. Select the *Instances* tab and click the *openshift-gitops-operator* subscription. -. Select the *YAML* tab and make your customization: -** To enable or disable the Argo CD console link, edit the value of `DISABLE_DEFAULT_ARGOCD_CONSOLELINK` as needed: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: openshift-gitops-operator -spec: - config: - env: - - name: DISABLE_DEFAULT_ARGOCD_CONSOLELINK - value: 'true' ----- diff --git a/modules/gitops-default-permissions-of-an-argocd-instance.adoc b/modules/gitops-default-permissions-of-an-argocd-instance.adoc deleted file mode 100644 index e039d6bd1b98..000000000000 --- a/modules/gitops-default-permissions-of-an-argocd-instance.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: PROCEDURE -[id="default-permissions-of-an-argocd-instance.adoc{context}"] - -= Default permissions of an Argocd instance - -By default Argo CD instance has the following permissions: - -* Argo CD instance has the `admin` privileges to manage resources only in the namespace where it is deployed. For instance, an Argo CD instance deployed in the **foo** namespace has the `admin` privileges to manage resources only for that namespace. - -* Argo CD has the following cluster-scoped permissions because Argo CD requires cluster-wide `read` privileges on resources to function appropriately: -+ -[source,yaml] ----- -- verbs: - - get - - list - - watch - apiGroups: - - '*' - resources: - - '*' - - verbs: - - get - - list - nonResourceURLs: - - '*' ----- - -[NOTE] -==== -* You can edit the cluster roles used by the `argocd-server` and `argocd-application-controller` components where Argo CD is running such that the `write` privileges are limited to only the namespaces and resources that you wish Argo CD to manage. -+ -[source,terminal] ----- -$ oc edit clusterrole argocd-server -$ oc edit clusterrole argocd-application-controller ----- -==== - diff --git a/modules/gitops-deploy-resources-different-namespaces.adoc b/modules/gitops-deploy-resources-different-namespaces.adoc deleted file mode 100644 index d3e680239a10..000000000000 --- a/modules/gitops-deploy-resources-different-namespaces.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/gitops/setting-up-argocd-instance.adoc - -:_content-type: PROCEDURE -[id="gitops-deploy-resources-different-namespaces_{context}"] -= Deploying resources to a different namespace - -To allow Argo CD to manage resources in other namespaces apart from where it is installed, configure the target namespace with a `argocd.argoproj.io/managed-by` label. - -.Procedure - -* Configure the namespace: -+ -[source,terminal] ----- -$ oc label namespace <namespace> \ -argocd.argoproj.io/managed-by=<namespace> <1> ----- -<1> The namespace where Argo CD is installed. - diff --git a/modules/gitops-dex-role-mappings.adoc b/modules/gitops-dex-role-mappings.adoc deleted file mode 100644 index 2d253c8c9f47..000000000000 --- a/modules/gitops-dex-role-mappings.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift - -:_content-type: PROCEDURE -[id="gitops-dex-role-mappings_{context}"] -= Mapping users to specific roles - -Argo CD cannot map users to specific roles if they have a direct `ClusterRoleBinding` role. You can manually change the role as `role:admin` on SSO through OpenShift. - -.Procedure - -. Create a group named `cluster-admins`. -+ -[source,terminal] ----- -$ oc adm groups new cluster-admins ----- -. Add the user to the group. -+ -[source,terminal] ----- -$ oc adm groups add-users cluster-admins USER ----- -. Apply the `cluster-admin` `ClusterRole` to the group: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-group cluster-admin cluster-admins ----- \ No newline at end of file diff --git a/modules/gitops-disable-dex-using-spec-sso.adoc b/modules/gitops-disable-dex-using-spec-sso.adoc deleted file mode 100644 index 4a649af5d8ce..000000000000 --- a/modules/gitops-disable-dex-using-spec-sso.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-using-dex - -:_content-type: PROCEDURE -[id="gitops-disable-dex-using-spec-sso_{context}"] -= Enabling or disabling Dex using .spec.sso - -You can configure {gitops-title} to use Dex as its SSO authentication provider by setting the `.spec.sso` parameter. - -.Procedure - -. To enable Dex, set the `.spec.sso.provider: dex` parameter in the YAML resource of the Operator: - -+ -[source,yaml] ----- -... -spec: - sso: - provider: dex - dex: - openShiftOAuth: true -... ----- -+ -. To disable dex, either remove the `spec.sso` element from the Argo CD custom resource, or specify a different SSO provider. \ No newline at end of file diff --git a/modules/gitops-disable-dex.adoc b/modules/gitops-disable-dex.adoc deleted file mode 100644 index 79412ef2a905..000000000000 --- a/modules/gitops-disable-dex.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-using-dex - -:_content-type: PROCEDURE -[id="gitops-disable-dex_{context}"] -= Disabling Dex - -Dex is installed by default for all the Argo CD instances created by the Operator. You can configure {gitops-title} to use Dex as the SSO authentication provider by setting the `.spec.dex` parameter. - -[IMPORTANT] -==== -In {gitops-title} v1.6.0, `DISABLE_DEX` is deprecated and is planned to be removed in {gitops-title} v1.10.0. Consider using the `.spec.sso.dex` parameter instead. See "Enabling or disabling Dex using .spec.sso". -==== - -.Procedure - -* Set the environmental variable `DISABLE_DEX` to `true` in the YAML resource of the Operator: -+ -[source,yaml] ----- -... -spec: - config: - env: - - name: DISABLE_DEX - value: "true" -... ----- \ No newline at end of file diff --git a/modules/gitops-disabling-monitoring-for-argo-cd-custom-resource-workloads.adoc b/modules/gitops-disabling-monitoring-for-argo-cd-custom-resource-workloads.adoc deleted file mode 100644 index 18c62bb219a0..000000000000 --- a/modules/gitops-disabling-monitoring-for-argo-cd-custom-resource-workloads.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * /cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc - -:_content-type: PROCEDURE -[id="gitops-disabling-monitoring-for-argo-cd-custom-resource-workloads_{context}"] -= Disabling Monitoring for Argo CD custom resource workloads - -You can disable workload monitoring for specific Argo CD instances. Disabling workload monitoring deletes the created PrometheusRule. - -.Procedure - -* Set the `.spec.monitoring.enabled` field value to `false` on a given Argo CD instance: -+ -.Example Argo CD custom resource - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: repo -spec: - ... - monitoring: - enabled: false - ... ----- \ No newline at end of file diff --git a/modules/gitops-enable-replicas-for-argo-cd-server.adoc b/modules/gitops-enable-replicas-for-argo-cd-server.adoc deleted file mode 100644 index 1565ee1064d7..000000000000 --- a/modules/gitops-enable-replicas-for-argo-cd-server.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/gitops/setting-up-argocd-instance.adoc - -:_content-type: PROCEDURE -[id="gitops-enable-replicas-for-argo-cd-server_{context}"] -= Enabling replicas for Argo CD server and repo server - -Argo CD-server and Argo CD-repo-server workloads are stateless. To better distribute your workloads among pods, you can increase the number of Argo CD-server and Argo CD-repo-server replicas. However, if a horizontal autoscaler is enabled on the Argo CD-server, it overrides the number of replicas you set. - -.Procedure - -* Set the `replicas` parameters for the `repo` and `server` spec to the number of replicas you want to run: -+ -.Example Argo CD custom resource - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: repo -spec: - repo: - replicas: <number_of_replicas> - server: - replicas: <number_of_replicas> - route: - enabled: true - path: / - tls: - insecureEdgeTerminationPolicy: Redirect - termination: passthrough - wildcardPolicy: None ----- \ No newline at end of file diff --git a/modules/gitops-enabling-dex.adoc b/modules/gitops-enabling-dex.adoc deleted file mode 100644 index bb8abaae5f90..000000000000 --- a/modules/gitops-enabling-dex.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift - -[id="enabling-dex_{context}"] -= Enabling Dex - -Argo CD embeds and bundles Dex as part of its installation. Dex is an identity service that uses OpenID Connect to drive authentication for other apps. - -.Procedure - -. Enable Dex by updating the `Subscription` resource for the OpenShift GitOps Operator. -+ -[source,yaml] ----- -spec: - config: - env: - - name: DISABLE_DEX - Value: "false" ----- -+ -This update causes the `argocd-cluster-dex-server` instance to run. - -. To enable login with {product-title}, update the `argo-cd` custom resource by adding the following field: -+ -[source,yaml] ----- -spec: - dex: - openShiftOAuth: true ----- - -. Enable role-based access control (RBAC) on `argo-cd` by modifying the following fields: -+ -[source,yaml] ----- -spec: - dex: - openShiftOAuth: true - rbac: - defaultPolicy: 'role:readonly' - policy: | - g, system:cluster-admins, role:admin - scopes: '[groups]' ----- diff --git a/modules/gitops-enabling-monitoring-for-argo-cd-custom-resource-workloads.adoc b/modules/gitops-enabling-monitoring-for-argo-cd-custom-resource-workloads.adoc deleted file mode 100644 index 32139dab191e..000000000000 --- a/modules/gitops-enabling-monitoring-for-argo-cd-custom-resource-workloads.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * /cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc - -:_content-type: PROCEDURE -[id="gitops-enabling-monitoring-for-argo-cd-custom-resource-workloads_{context}"] -= Enabling Monitoring for Argo CD custom resource workloads - -By default, the monitoring configuration for Argo CD custom resource workloads is set to `false`. - -With {gitops-title}, you can enable workload monitoring for specific Argo CD instances. As a result, the Operator creates a `PrometheusRule` object that contains alert rules for all the workloads managed by the specific Argo CD instances. These alert rules trigger the firing of an alert when the replica count of the corresponding component has drifted from the desired state for a certain amount of time. The Operator will not overwrite the changes made to the `PrometheusRule` object by the users. - -.Procedure - -. Set the `.spec.monitoring.enabled` field value to `true` on a given Argo CD instance: -+ -.Example Argo CD custom resource - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: repo -spec: - ... - monitoring: - enabled: true - ... ----- - -. Verify whether an alert rule is included in the PrometheusRule created by the Operator: -+ -.Example alert rule - -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: argocd-component-status-alert - namespace: openshift-gitops -spec: - groups: - - name: ArgoCDComponentStatus - rules: - ... - - alert: ApplicationSetControllerNotReady <1> - annotations: - message: >- - applicationSet controller deployment for Argo CD instance in - namespace "default" is not running - expr: >- - kube_statefulset_status_replicas{statefulset="openshift-gitops-application-controller statefulset", - namespace="openshift-gitops"} != - kube_statefulset_status_replicas_ready{statefulset="openshift-gitops-application-controller statefulset", - namespace="openshift-gitops"} - for: 1m - labels: - severity: critical ----- -<1> Alert rule in the PrometheusRule that checks whether the workloads created by the Argo CD instances are running as expected. diff --git a/modules/gitops-in-built-permissions.adoc b/modules/gitops-in-built-permissions.adoc deleted file mode 100644 index 8b11bab53b23..000000000000 --- a/modules/gitops-in-built-permissions.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module is included in the following assemblies: -// -// * openshift-docs/cicd/gitops/configuring-a-cluster-to-use-gitops.adoc - -[id="in-built-permissions_{context}"] -= In-built permissions for Argo CD - -This section lists the permissions that are granted to ArgoCD to manage specific cluster-scoped resources which include cluster operators, optional OLM operators, and user management. Note that ArgoCD is not granted `cluster-admin` permissions. - -.Permissions granted to Argo CD -|========================== -|Resource group|What it configures for a user or an administrator -|operators.coreos.com |Optional operators managed by OLM -|user.openshift.io, rbac.authorization.k8s.io |Groups, Users, and their permissions -|config.openshift.io |Control plane operators managed by CVO used to configure cluster-wide build configuration, registry configuration, and scheduler policies -|storage.k8s.io |Storage -|console.openshift.io|Console customization -|========================== diff --git a/modules/gitops-inbuilt-permissions-for-cluster-config.adoc b/modules/gitops-inbuilt-permissions-for-cluster-config.adoc deleted file mode 100644 index db5440fb4cc9..000000000000 --- a/modules/gitops-inbuilt-permissions-for-cluster-config.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assembly: -// -// * configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: REFERENCE -[id="gitops-inbuilt-permissions-for-cluster-config_{context}"] -= In-built permissions for cluster configuration - -By default, the Argo CD instance has permissions to manage specific cluster-scoped resources such as cluster Operators, optional OLM Operators and user management. - -[NOTE] -==== -Argo CD does not have cluster-admin permissions. -==== - -Permissions for the Argo CD instance: -|=== -|**Resources** |**Descriptions** -|Resource Groups | Configure the user or administrator -|`operators.coreos.com` | Optional Operators managed by OLM -|`user.openshift.io` , `rbac.authorization.k8s.io` | Groups, Users and their permissions -|`config.openshift.io` | Control plane Operators managed by CVO used to configure cluster-wide build configuration, registry configuration and scheduler policies -|`storage.k8s.io` | Storage -|`console.openshift.io` | Console customization -|=== \ No newline at end of file diff --git a/modules/gitops-installing-olm-operators-using-gitops.adoc b/modules/gitops-installing-olm-operators-using-gitops.adoc deleted file mode 100644 index 84a00f1d15b6..000000000000 --- a/modules/gitops-installing-olm-operators-using-gitops.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assembly: -// -// * configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: PROCEDURE -[id="gitops-installing-olm-operators-using-gitops_{context}"] -= Installing OLM Operators using {gitops-title} - -{gitops-title} with cluster configurations manages specific cluster-scoped resources and takes care of installing cluster Operators or any namespace-scoped OLM Operators. - -Consider a case where as a cluster administrator, you have to install an OLM Operator such as Tekton. You use the {product-title} web console to manually install a Tekton Operator or the OpenShift CLI to manually install a Tekton subscription and Tekton Operator group on your cluster. - -{gitops-title} places your Kubernetes resources in your Git repository. As a cluster administrator, use {gitops-title} to manage and automate the installation of other OLM Operators without any manual procedures. For example, after you place the Tekton subscription in your Git repository by using {gitops-title}, the {gitops-title} automatically takes this Tekton subscription from your Git repository and installs the Tekton Operator on your cluster. - -== Installing cluster-scoped Operators - -Operator Lifecycle Manager (OLM) uses a default `global-operators` Operator group in the `openshift-operators` namespace for cluster-scoped Operators. Hence you do not have to manage the `OperatorGroup` resource in your Gitops repository. However, for namespace-scoped Operators, you must manage the `OperatorGroup` resource in that namespace. - -To install cluster-scoped Operators, create and place the `Subscription` resource of the required Operator in your Git repository. - -.Example: Grafana Operator subscription - -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: grafana -spec: - channel: v4 - installPlanApproval: Automatic - name: grafana-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -== Installing namepace-scoped Operators - -To install namespace-scoped Operators, create and place the `Subscription` and `OperatorGroup` resources of the required Operator in your Git repository. - -.Example: Ansible Automation Platform Resource Operator - -[source,yaml] ----- -... -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - name: ansible-automation-platform -... -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: ansible-automation-platform-operator - namespace: ansible-automation-platform -spec: - targetNamespaces: - - ansible-automation-platform -... -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: ansible-automation-platform - namespace: ansible-automation-platform -spec: - channel: patch-me - installPlanApproval: Automatic - name: ansible-automation-platform-operator - source: redhat-operators - sourceNamespace: openshift-marketplace -... ----- - -[IMPORTANT] -==== -When deploying multiple Operators using {gitops-title}, you must create only a single Operator group in the corresponding namespace. If more than one Operator group exists in a single namespace, any CSV created in that namespace transition to a `failure` state with the `TooManyOperatorGroups` reason. After the number of Operator groups in their corresponding namespaces reaches one, all the previous `failure` state CSVs transition to `pending` state. You must manually approve the pending install plan to complete the Operator installation. -==== - diff --git a/modules/gitops-keycloak-identity-brokering-with-openshift-oauthclient.adoc b/modules/gitops-keycloak-identity-brokering-with-openshift-oauthclient.adoc deleted file mode 100644 index 1387e585bb76..000000000000 --- a/modules/gitops-keycloak-identity-brokering-with-openshift-oauthclient.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module is included in the following assemblies: -// -// * - -[id="keycloak-identity-brokering-with-openshift-oauthclient_{context}"] -= Keycloak Identity Brokering with {product-title} - -You can configure a Keycloak instance to use {product-title} for authentication through Identity Brokering. This allows for Single Sign-On (SSO) between the {product-title} cluster and the Keycloak instance. - -.Prerequisites - -* `jq` CLI tool is installed. - - -.Procedure - -. Obtain the {product-title} API URL: -+ -[source,terminal] ----- -$ curl -s -k -H "Authorization: Bearer $(oc whoami -t)" https://<openshift-user-facing-api-url>/apis/config.openshift.io/v1/infrastructures/cluster | jq ".status.apiServerURL". ----- -+ -[NOTE] -==== -The address of the {product-title} API is often protected by HTTPS. Therefore, you must configure X509_CA_BUNDLE in the container and set it to `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`. Otherwise, Keycloak cannot communicate with the API Server. -==== - -. In the Keycloak server dashboard, navigate to *Identity Providers* and select *Openshift v4*. Specify the following values: -*Base Url*:: {product-title} 4 API URL -*Client ID*:: `keycloak-broker` -*Client Secret*:: A secret that you want define -+ -Now you can log in to Argo CD with your {product-title} credentials through Keycloak as an Identity Broker. diff --git a/modules/gitops-logging-into-keycloak.adoc b/modules/gitops-logging-into-keycloak.adoc deleted file mode 100644 index a6e1d69fa243..000000000000 --- a/modules/gitops-logging-into-keycloak.adoc +++ /dev/null @@ -1,68 +0,0 @@ -:_content-type: PROCEDURE -[id="gitops-logging-into-keycloak_{context}"] -= Logging in to Keycloak - -Log in to the Keycloak console to manage identities or roles and define the permissions assigned to the various roles. - -.Prerequisites - -* The default configuration of Dex is removed. -* Your Argo CD CR must be configured to use the Keycloak SSO provider. - -.Procedure - -. Get the Keycloak route URL for login: -+ -[source,terminal] ----- -$ oc -n argocd get route keycloak - -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -keycloak keycloak-default.apps.ci-ln-******.origin-ci-int-aws.dev.**.com keycloak <all> reencrypt None ----- -. Get the Keycloak pod name that stores the user name and password as environment variables: -+ -[source,terminal] ----- -$ oc -n argocd get pods - -NAME READY STATUS RESTARTS AGE -keycloak-1-2sjcl 1/1 Running 0 45m ----- -.. Get the Keycloak user name: -+ -[source,terminal] ----- -$ oc -n argocd exec keycloak-1-2sjcl -- "env" | grep SSO_ADMIN_USERNAME - -SSO_ADMIN_USERNAME=<username> ----- -.. Get the Keycloak password: -+ -[source,terminal] ----- -$ oc -n argocd exec keycloak-1-2sjcl -- "env" | grep SSO_ADMIN_PASSWORD - -SSO_ADMIN_PASSWORD=<password> ----- -. On the login page, click *LOG IN VIA KEYCLOAK*. -+ -[NOTE] -==== -You only see the option *LOGIN VIA KEYCLOAK* after the Keycloak instance is ready. -==== -. Click *Login with OpenShift*. -+ -[NOTE] -==== -Login using `kubeadmin` is not supported. -==== -+ -. Enter the OpenShift credentials to log in. -. Optional: By default, any user logged in to Argo CD has read-only access. You can manage the user level access by updating the `argocd-rbac-cm` config map: -+ -[source,yaml] ----- -policy.csv: -<name>, <email>, role:admin ----- diff --git a/modules/gitops-registering-an-additional-oauth-client.adoc b/modules/gitops-registering-an-additional-oauth-client.adoc deleted file mode 100644 index 245fe545d673..000000000000 --- a/modules/gitops-registering-an-additional-oauth-client.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift - -[id="registering-an-additional-oauth-client_{context}"] -= Registering an additional an OAuth client - -If you need an additional OAuth client to manage authentication for your {product-title} cluster, you can register one. - -.Procedure - -* To register your client: -+ -[source,terminal] ----- -$ oc create -f <(echo ' -kind: OAuthClient -apiVersion: oauth.openshift.io/v1 -metadata: - name: keycloak-broker <1> -secret: "..." <2> -redirectURIs: -- "https://keycloak-keycloak.apps.dev-svc-4.7-020201.devcluster.openshift.com/auth/realms/myrealm/broker/openshift-v4/endpoint" <3> -grantMethod: prompt <4> -') ----- - -<1> The name of the OAuth client is used as the `client_id` parameter when making requests to `<namespace_route>/oauth/authorize` and `<namespace_route>/oauth/token`. -<2> The `secret` is used as the client_secret parameter when making requests to `<namespace_route>/oauth/token`. -<3> The `redirect_uri` parameter specified in requests to `<namespace_route>/oauth/authorize` and `<namespace_route>/oauth/token` must be equal to or prefixed by one of the URIs listed in the `redirectURIs` parameter value. -<4> If the user has not granted access to this client, the `grantMethod` determines which action to take when this client requests tokens. Specify `auto` to automatically approve the grant and retry the request, or `prompt` to prompt the user to approve or deny the grant. diff --git a/modules/gitops-registering-an-oauth-client.adoc b/modules/gitops-registering-an-oauth-client.adoc deleted file mode 100644 index 85957c2b49c5..000000000000 --- a/modules/gitops-registering-an-oauth-client.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module is included in the following assemblies: -// -// * configuring-sso-for-argo-cd-on-openshift - -[id="registering-an-additional-oauth-client_{context}"] -= Registering an additional an OAuth client - -If you need an additional OAuth client to manage authentication for your {product-title} cluster, you can register one. - -.Procedure - -* To register your client: -+ -[source,terminal] ----- -$ oc create -f <(echo ' -kind: OAuthClient -apiVersion: oauth.openshift.io/v1 -metadata: - name: keycloak-broker <1> -secret: "..." <2> -redirectURIs: -- "https://keycloak-keycloak.apps.dev-svc-4.7-020201.devcluster.openshift.com/auth/realms/myrealm/broker/openshift-v4/endpoint" <3> -grantMethod: prompt <4> -') ----- - -<1> The name of the OAuth client is used as the `client_id` parameter when making requests to `<namespace_route>/oauth/authorize` and `<namespace_route>/oauth/token`. -<2> The `secret` is used as the `client_secret` parameter when making requests to `<namespace_route>/oauth/token`. -<3> The `redirect_uri` parameter specified in requests to `<namespace_route>/oauth/authorize` and `<namespace_route>/oauth/token` must be equal to or prefixed by one of the URIs listed in the `redirectURIs` parameter value. -<4> If the user has not granted access to this client, the `grantMethod` determines which action to take when this client requests tokens. Specify `auto` to automatically approve the grant and retry the request, or `prompt` to prompt the user to approve or deny the grant. diff --git a/modules/gitops-release-notes-1-1.adoc b/modules/gitops-release-notes-1-1.adoc deleted file mode 100644 index c942d0627bed..000000000000 --- a/modules/gitops-release-notes-1-1.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-1_{context}"] -= Release notes for {gitops-title} 1.1 - -{gitops-title} 1.1 is now available on {product-title} 4.7. - -[id="support-matrix-1-1_{context}"] -== Support matrix - -Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. - -link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] - -In the table below, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[cols="1,1",options="header"] -|=== -| Feature | {gitops-title} 1.1 -| Argo CD -| GA -| Argo CD ApplicationSet -| TP -| {gitops-title} Application Manager CLI (`kam`) -| TP -|=== - -[id="new-features-1-1_{context}"] -== New features -In addition to the fixes and stability improvements, the following sections highlight what is new in {gitops-title} 1.1: - -* The `ApplicationSet` feature is now added (Technology Preview). The `ApplicationSet` feature enables both automation and greater flexibility when managing Argo CD applications across a large number of clusters and within monorepos. It also makes self-service usage possible on multitenant Kubernetes clusters. -* Argo CD is now integrated with cluster logging stack and with the {product-title} Monitoring and Alerting features. -* Argo CD auth is now integrated with {product-title}. -* Argo CD applications controller now supports horizontal scaling. -* Argo CD Redis servers now support high availability (HA). - -[id="fixed-issues-1-1_{context}"] -== Fixed issues -The following issues were resolved in the current release: - -* Previously, {gitops-title} did not work as expected in a proxy server setup with active global proxy settings. This issue is fixed and now Argo CD is configured by the {gitops-title} Operator using fully qualified domain names (FQDN) for the pods to enable communication between components. link:https://issues.redhat.com/browse/GITOPS-703[GITOPS-703] -* The {gitops-title} backend relies on the `?ref=` query parameter in the {gitops-title} URL to make API calls. Previously, this parameter was not read from the URL, causing the backend to always consider the default reference. This issue is fixed and the {gitops-title} backend now extracts the reference query parameter from the {gitops-title} URL and only uses the default reference when there is no input reference provided. link:https://issues.redhat.com/browse/GITOPS-817[GITOPS-817] -* Previously, the {gitops-title} backend failed to find the valid GitLab repository. This was because the {gitops-title} backend checked for `main` as the branch reference, instead of `master` in the GitLab repository. This issue is fixed now. link:https://issues.redhat.com/browse/GITOPS-768[GITOPS-768] -* The *Environments* page in the *Developer* perspective of the {product-title} web console now shows the list of applications and the number of environments. This page also displays an Argo CD link that directs you to the Argo CD *Applications* page that lists all the applications. The Argo CD *Applications* page has *LABELS* (for example, `app.kubernetes.io/name=appName`) that help you filter only the applications of your choice. link:https://issues.redhat.com/browse/GITOPS-544[GITOPS-544] - - -[id="known-issues-1-1_{context}"] -== Known issues -These are the known issues in {gitops-title} 1.1: - -* {gitops-title} does not support Helm v2 and ksonnet. -* The Red Hat SSO (RH SSO) Operator is not supported in disconnected clusters. As a result, the {gitops-title} Operator and RH SSO integration is not supported in disconnected clusters. -* When you delete an Argo CD application from the {product-title} web console, the Argo CD application gets deleted in the user interface, but the deployments are still present in the cluster. As a workaround, delete the Argo CD application from the Argo CD console. link:https://issues.redhat.com/browse/GITOPS-830[GITOPS-830] - - -[id="breaking-change-1-1_{context}"] -== Breaking Change -=== Upgrading from {gitops-title} v1.0.1 - -When you upgrade from {gitops-title} `v1.0.1` to `v1.1`, the {gitops-title} Operator renames the default Argo CD instance created in the `openshift-gitops` namespace from `argocd-cluster` to `openshift-gitops`. - -This is a breaking change and needs the following steps to be performed manually, before the upgrade: - -. Go to the {product-title} web console and copy the content of the `argocd-cm.yml` config map file in the `openshift-gitops` namespace to a local file. The content may look like the following example: -+ -.Example argocd config map YAML -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: -selfLink: /api/v1/namespaces/openshift-gitops/configmaps/argocd-cm -resourceVersion: '112532' -name: argocd-cm -uid: f5226fbc-883d-47db-8b53-b5e363f007af -creationTimestamp: '2021-04-16T19:24:08Z' -managedFields: -... -namespace: openshift-gitops -labels: - app.kubernetes.io/managed-by: argocd-cluster - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: "" <1> -admin.enabled: 'true' -statusbadge.enabled: 'false' -resource.exclusions: | - - apiGroups: - - tekton.dev - clusters: - - '*' - kinds: - - TaskRun - - PipelineRun -ga.trackingid: '' -repositories: | - - type: git - url: https://github.com/user-name/argocd-example-apps -ga.anonymizeusers: 'false' -help.chatUrl: '' -url: >- - https://argocd-cluster-server-openshift-gitops.apps.dev-svc-4.7-041614.devcluster.openshift.com "" <2> -help.chatText: '' -kustomize.buildOptions: '' -resource.inclusions: '' -repository.credentials: '' -users.anonymous.enabled: 'false' -configManagementPlugins: '' -application.instanceLabelKey: '' ----- -<1> Restore only the `data` section of the content in the `argocd-cm.yml` config map file manually. -<2> Replace the URL value in the config map entry with the new instance name `openshift-gitops`. - -. Delete the default `argocd-cluster` instance. -. Edit the new `argocd-cm.yml` config map file to restore the entire `data` section manually. -. Replace the URL value in the config map entry with the new instance name `openshift-gitops`. For example, in the preceding example, replace the URL value with the following URL value: -+ -[source,yaml] ----- -url: >- - https://openshift-gitops-server-openshift-gitops.apps.dev-svc-4.7-041614.devcluster.openshift.com ----- -. Login to the Argo CD cluster and verify that the previous configurations are present. diff --git a/modules/gitops-release-notes-1-2-1.adoc b/modules/gitops-release-notes-1-2-1.adoc deleted file mode 100644 index be158a0cd0ba..000000000000 --- a/modules/gitops-release-notes-1-2-1.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-2-1_{context}"] -= Release notes for {gitops-title} 1.2.1 - -{gitops-title} 1.2.1 is now available on {product-title} 4.8. - -[id="support-matrix-1-2-1_{context}"] -== Support matrix - -Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. - -link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] - -In the table below, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[cols="1,1",options="header"] -|=== -| Feature | {gitops-title} 1.2.1 -| Argo CD -| GA -| Argo CD ApplicationSet -| TP -| {gitops-title} Application Manager CLI (`kam`) -| TP -|=== - -[id="fixed-issues-1-2-1_{context}"] -== Fixed issues -The following issues were resolved in the current release: - -* Previously, huge memory spikes were observed on the application controller on startup. The flag `--kubectl-parallelism-limit` for the application controller is now set to 10 by default, however -this value can be overridden by specifying a number for `.spec.controller.kubeParallelismLimit` in the Argo CD CR specification. -link:https://issues.redhat.com/browse/GITOPS-1255[GITOPS-1255] - -* The latest Triggers APIs caused Kubernetes build failure due to duplicate entries in the kustomization.yaml when using the `kam bootstrap` command. The Pipelines and Tekton triggers components have now been updated to v0.24.2 and v0.14.2, respectively, to address this issue. -link:https://issues.redhat.com/browse/GITOPS-1273[GITOPS-1273] - -* Persisting RBAC roles and bindings are now automatically removed from the target namespace when the Argo CD instance from the source namespace is deleted. -link:https://issues.redhat.com/browse/GITOPS-1228[GITOPS-1228] - -* Previously, when deploying an Argo CD instance into a namespace, the Argo CD instance would change the "managed-by" label to be its own namespace. This fix would make namespaces unlabelled while also making sure the required RBAC roles and bindings are created and deleted for the namespace. -link:https://issues.redhat.com/browse/GITOPS-1247[GITOPS-1247] - -* Previously, the default resource request limits on Argo CD workloads, specifically for the repo-server and application controller, were found to be very restrictive. The existing resource quota has now been removed and the default memory limit has been increased to 1024M in the repo server. Please note that this change will only affect new installations; existing Argo CD instance workloads will not be affected. -link:https://issues.redhat.com/browse/GITOPS-1274[GITOPS-1274] diff --git a/modules/gitops-release-notes-1-2-2.adoc b/modules/gitops-release-notes-1-2-2.adoc deleted file mode 100644 index e32490369cf2..000000000000 --- a/modules/gitops-release-notes-1-2-2.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-2-2_{context}"] -= Release notes for {gitops-title} 1.2.2 - -{gitops-title} 1.2.2 is now available on {product-title} 4.8. - -[id="fixed-issues-1-2-2_{context}"] -== Fixed issues -The following issue was resolved in the current release: - -* All versions of Argo CD are vulnerable to a path traversal bug that allows to pass arbitrary values to be consumed by Helm charts. This update fixes the CVE-2022-24348 gitops error, path traversal and dereference of symlinks when passing Helm value files. -link:https://issues.redhat.com/browse/GITOPS-1756[GITOPS-1756] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-2.adoc b/modules/gitops-release-notes-1-2.adoc deleted file mode 100644 index 5f1db96f1025..000000000000 --- a/modules/gitops-release-notes-1-2.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-2_{context}"] -= Release notes for {gitops-title} 1.2 - -{gitops-title} 1.2 is now available on {product-title} 4.8. - -[id="support-matrix-1-2_{context}"] -== Support matrix - -Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. - -link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] - -In the table below, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[cols="1,1",options="header"] -|=== -| Feature | {gitops-title} 1.2 -| Argo CD -| GA -| Argo CD ApplicationSet -| TP -| {gitops-title} Application Manager CLI (`kam`) -| TP -|=== - -[id="new-features-1-2_{context}"] -== New features -In addition to the fixes and stability improvements, the following sections highlight what is new in {gitops-title} 1.2: - -* If you do not have read or write access to the openshift-gitops namespace, you can now use the `DISABLE_DEFAULT_ARGOCD_INSTANCE` environment variable in the GitOps Operator and set the value to `TRUE` to prevent the default Argo CD instance from starting in the `openshift-gitops` namespace. -* Resource requests and limits are now configured in Argo CD workloads. Resource quota is enabled in the `openshift-gitops` namespace. As a result, out-of-band workloads deployed manually in the openshift-gitops namespace must be configured with resource requests and limits and the resource quota may need to be increased. -* Argo CD authentication is now integrated with Red Hat SSO and it is automatically configured with OpenShift 4 Identity Provider on the cluster. This feature is disabled by default. To enable Red Hat SSO, add SSO configuration in `ArgoCD` CR as shown below. Currently,`keycloak` is the only supported provider. - -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - sso: - provider: keycloak - server: - route: - enabled: true ----- -* You can now define hostnames using route labels to support router sharding. Support for setting labels on the `server` (argocd server), `grafana`, and `prometheus` routes is now available. To set labels on a route, add `labels` under the route configuration for a server in the `ArgoCD` CR. -+ -.Example `ArgoCD` CR YAML to set labels on argocd server -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - server: - route: - enabled: true - labels: - key1: value1 - key2: value2 ----- -* The GitOps Operator now automatically grants permissions to Argo CD instances to manage resources in target namespaces by applying labels. Users can label the target namespace with the label `argocd.argoproj.io/managed-by: <source-namespace>`, where the `source-namespace` is the namespace where the argocd instance is deployed. - -[id="fixed-issues-1-2_{context}"] -== Fixed issues -The following issues were resolved in the current release: - -* Previously, if a user created additional instances of Argo CD managed by the default cluster instance in the openshift-gitops namespace, the application responsible for the new Argo CD instance would get stuck in an `OutOfSync` status. This issue has now been resolved by adding an owner reference to the cluster secret. link:https://issues.redhat.com/browse/GITOPS-1025[GITOPS-1025] - -[id="known-issues-1-2_{context}"] -== Known issues -These are the known issues in {gitops-title} 1.2: - -* When an Argo CD instance is deleted from the source namespace, the `argocd.argoproj.io/managed-by` labels in the target namespaces are not removed. link:https://issues.redhat.com/browse/GITOPS-1228[GITOPS-1228] - -* Resource quota has been enabled in the openshift-gitops namespace in {gitops-title} 1.2. This can affect out-of-band workloads deployed manually and workloads deployed by the default Argo CD instance in the `openshift-gitops` namespace. When you upgrade from {gitops-title} `v1.1.2` to `v1.2` such workloads must be configured with resource requests and limits. If there are any additional workloads, the resource quota in the openshift-gitops namespace must be increased. - -+ -Current Resource Quota for `openshift-gitops` namespace. -+ -[cols="1,1,1",options="header"] -|=== -| *Resource* | *Requests* | *Limits* - -| CPU -| 6688m -| 13750m - -| Memory -| 4544Mi -| 9070Mi - -|=== -+ -You can use the below command to update the CPU limits. -+ -[source,terminal] ----- -$ oc patch resourcequota openshift-gitops-compute-resources -n openshift-gitops --type='json' -p='[{"op": "replace", "path": "/spec/hard/limits.cpu", "value":"9000m"}]' ----- -+ -You can use the below command to update the CPU requests. -+ -[source,terminal] ----- -$ oc patch resourcequota openshift-gitops-compute-resources -n openshift-gitops --type='json' -p='[{"op": "replace", "path": "/spec/hard/cpu", "value":"7000m"}] ----- -+ -You can replace the path in the above commands from `cpu` to `memory` to update the memory. diff --git a/modules/gitops-release-notes-1-3-0.adoc b/modules/gitops-release-notes-1-3-0.adoc deleted file mode 100644 index 994606aff4ea..000000000000 --- a/modules/gitops-release-notes-1-3-0.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3_{context}"] -= Release notes for {gitops-title} 1.3 - -{gitops-title} 1.3 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="new-features-1-3_{context}"] -== New features -In addition to the fixes and stability improvements, the following sections highlight what is new in {gitops-title} 1.3.0: - -* For a fresh install of v1.3.0, Dex is automatically configured. You can log into the default Argo CD instance in the `openshift-gitops` namespace using the OpenShift or `kubeadmin` credentials. As an admin you can disable the Dex installation after the Operator is installed which will remove the Dex deployment from the `openshift-gitops` namespace. -* The default Argo CD instance installed by the Operator as well as accompanying controllers can now run on the infrastructure nodes of the cluster by setting a simple configuration toggle. -* Internal communications in Argo CD can now be secured using the TLS and the OpenShift cluster certificates. The Argo CD routes can now leverage the OpenShift cluster certificates in addition to using external certificate managers such as the cert-manager. -* Use the improved *Environments* page in the *Developer* perspective of the console 4.9 to gain insights into the GitOps environments. -* You can now access custom health checks in Argo CD for `DeploymentConfig` resources, `Route` resources, and Operators installed using OLM. -* The GitOps Operator now conforms to the naming conventions recommended by the latest Operator-SDK: -** The prefix `gitops-operator-` is added to all resources -** Service account is renamed to `gitops-operator-controller-manager` - - -[id="fixed-issues-1-3_{context}"] -== Fixed issues -The following issues were resolved in the current release: - -* Previously, if you set up a new namespace to be managed by a new instance of Argo CD, it would immediately be **Out Of Sync** due to the new roles and bindings that the Operator creates to manage that new namespace. This behavior is fixed. link:https://issues.redhat.com/browse/GITOPS-1384[GITOPS-1384] - -[id="known-issues-1-3_{context}"] -== Known issues - -* While migrating from the Dex authentication provider to the Keycloak provider, you may experience login issues with Keycloak. link:https://issues.redhat.com/browse/GITOPS-1450[GITOPS-1450] -+ -To prevent the above issue, when migrating, uninstall Dex by removing the `.spec.dex` section found in the Argo CD custom resource. Allow a few minutes for Dex to uninstall completely, and then proceed to install Keycloak by adding `.spec.sso.provider: keycloak` to the Argo CD custom resource. -+ -As a workaround, uninstall Keycloak by removing `.spec.sso.provider: keycloak` and then re-install. diff --git a/modules/gitops-release-notes-1-3-1.adoc b/modules/gitops-release-notes-1-3-1.adoc deleted file mode 100644 index 7b9eb36345b9..000000000000 --- a/modules/gitops-release-notes-1-3-1.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3-1_{context}"] -= Release notes for {gitops-title} 1.3.1 - -{gitops-title} 1.3.1 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="fixed-issues-1-3-1_{context}"] -== Fixed issues - -* If you upgrade to v1.3.0, the Operator does not return an ordered slice of environment variables. As a result, the reconciler fails causing the frequent recreation of Argo CD pods in {product-title} clusters running behind a proxy. This update fixes the issue so that Argo CD pods are not recreated. link:https://issues.redhat.com/browse/GITOPS-1489[GITOPS-1489] diff --git a/modules/gitops-release-notes-1-3-2.adoc b/modules/gitops-release-notes-1-3-2.adoc deleted file mode 100644 index 835b5a4a92d9..000000000000 --- a/modules/gitops-release-notes-1-3-2.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3-2_{context}"] -= Release notes for {gitops-title} 1.3.2 - -{gitops-title} 1.3.2 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="new-features-1-3-2_{context}"] -== New features - -In addition to the fixes and stability improvements, the following sections highlight what is new in {gitops-title} 1.3.2: - -* Upgraded Argo CD to version *2.1.8* - -* Upgraded Dex to version *2.30.0* - -[id="fixed-issues-1-3-2_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Previously, in the OperatorHub UI under the *Infrastructure Features* section, when you filtered by `Disconnected` the {gitops-title} Operator did not show in the search results, as the Operator did not have the related annotation set in its CSV file. With this update, the `Disconnected Cluster` annotation has been added to the {gitops-title} Operator as an infrastructure feature. link:https://issues.redhat.com/browse/GITOPS-1539[GITOPS-1539] - -* When using an `Namespace-scoped` Argo CD instance, for example, an Argo CD instance that is not scoped to *All Namepsaces* in a cluster, {gitops-title} dynamically maintains a list of managed namespaces. These namespaces include the `argocd.argoproj.io/managed-by` label. This list of namespaces is stored in a cache in *Argo CD -> Settings -> Clusters -> "in-cluster" -> NAMESPACES*. Before this update, if you deleted one of these namespaces, the Operator ignored that, and the namespace remained in the list. This behavior broke the *CONNECTION STATE* in that cluster configuration, and all sync attempts resulted in errors. For example: -+ -[source,text] ----- -Argo service account does not have <random_verb> on <random_resource_type> in namespace <the_namespace_you_deleted>. ----- -+ -This bug is fixed. link:https://issues.redhat.com/browse/GITOPS-1521[GITOPS-1521] - -* With this update, the {gitops-title} Operator has been annotated with the *Deep Insights* capability level. link:https://issues.redhat.com/browse/GITOPS-1519[GITOPS-1519] - -* Previously, the Argo CD Operator managed the `resource.exclusion` field by itself but ignored the `resource.inclusion` field. This prevented the `resource.inclusion` field configured in the `Argo CD` CR to generate in the `argocd-cm` configuration map. This bug is fixed. link:https://issues.redhat.com/browse/GITOPS-1518[GITOPS-1518] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-3-3.adoc b/modules/gitops-release-notes-1-3-3.adoc deleted file mode 100644 index bdd1093dd0a9..000000000000 --- a/modules/gitops-release-notes-1-3-3.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3-3_{context}"] -= Release notes for {gitops-title} 1.3.3 - -{gitops-title} 1.3.3 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="fixed-issues-1-3-3_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* All versions of Argo CD are vulnerable to a path traversal bug that allows to pass arbitrary values to be consumed by Helm charts. This update fixes the `CVE-2022-24348 gitops` error, path traversal and dereference of symlinks when passing Helm value files. link:https://issues.redhat.com/browse/GITOPS-1756[GITOPS-1756] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-3-6.adoc b/modules/gitops-release-notes-1-3-6.adoc deleted file mode 100644 index 2afd8942a295..000000000000 --- a/modules/gitops-release-notes-1-3-6.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3-6_{context}"] -= Release notes for {gitops-title} 1.3.6 - -{gitops-title} 1.3.6 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="fixed-issues-1-3-6_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* In {gitops-title}, improper access control allows admin privilege escalation link:https://access.redhat.com/security/cve/CVE-2022-1025[(CVE-2022-1025)]. This update fixes the issue. - -* A path traversal flaw allows leaking of out-of-bound files link:https://access.redhat.com/security/cve/CVE-2022-24731[(CVE-2022-24731)]. This update fixes the issue. - -* A path traversal flaw and improper access control allows leaking of out-of-bound files link:https://access.redhat.com/security/cve/CVE-2022-24730[(CVE-2022-24730)]. This update fixes the issue. - diff --git a/modules/gitops-release-notes-1-3-7.adoc b/modules/gitops-release-notes-1-3-7.adoc deleted file mode 100644 index 6289359cdb22..000000000000 --- a/modules/gitops-release-notes-1-3-7.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-3-7_{context}"] -= Release notes for {gitops-title} 1.3.7 - -{gitops-title} 1.3.7 is now available on {product-title} 4.7, 4.8, 4.9, and 4.6 with limited GA support. - -[id="fixed-issues-1-3-7_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* Before this update, a flaw was found in OpenSSL. This update fixes the issue by updating the base images to the latest version to avoid the OpenSSL flaw. link:https://access.redhat.com/security/cve/CVE-2022-0778[(CVE-2022-0778)]. - -[NOTE] -==== -To install the current release of {gitops-title} 1.3 and receive further updates during its product life cycle, switch to the **GitOps-1.3** channel. -==== diff --git a/modules/gitops-release-notes-1-4-0.adoc b/modules/gitops-release-notes-1-4-0.adoc deleted file mode 100644 index e8aa41bac091..000000000000 --- a/modules/gitops-release-notes-1-4-0.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-0_{context}"] -= Release notes for {gitops-title} 1.4.0 - -{gitops-title} 1.4.0 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="new-features-1-4-0_{context}"] -== New features - -The current release adds the following improvements. - -* This enhancement upgrades the {gitops-title} Application Manager CLI (`kam`) to version *0.0.41*. link:https://issues.redhat.com/browse/GITOPS-1669[GITOPS-1669] - -* This enhancement upgrades Argo CD to version *2.2.2*. link:https://issues.redhat.com/browse/GITOPS-1532[GITOPS-1532] - -* This enhancement upgrades Helm to version *3.7.1*. link:https://issues.redhat.com/browse/GITOPS-1530[GITOPS-1530] - -* This enhancement adds the health status of the `DeploymentConfig`, `Route`, and `OLM Operator` items to the Argo CD Dashboard and {product-title} web console. This information helps you monitor the overall health status of your application. link:https://issues.redhat.com/browse/GITOPS-655[GITOPS-655], link:https://issues.redhat.com/browse/GITOPS-915[GITOPS-915], link:https://issues.redhat.com/browse/GITOPS-916[GITOPS-916], link:https://issues.redhat.com/browse/GITOPS-1110[GITOPS-1110] - -* With this update, you can to specify the number of desired replicas for the `argocd-server` and `argocd-repo-server` components by setting the `.spec.server.replicas` and `.spec.repo.replicas` attributes in the Argo CD custom resource, respectively. If you configure the horizontal pod autoscaler (HPA) for the `argocd-server` components, it takes precedence over the Argo CD custom resource attributes. link:https://issues.redhat.com/browse/GITOPS-1245[GITOPS-1245] - -* As an administrative user, when you give Argo CD access to a namespace by using the `argocd.argoproj.io/managed-by` label, it assumes namespace-admin privileges. These privileges are an issue for administrators who provide namespaces to non-administrators, such as development teams, because the privileges enable non-administrators to modify objects such as network policies. -+ -With this update, administrators can configure a common cluster role for all the managed namespaces. In role bindings for the Argo CD application controller, the Operator refers to the `CONTROLLER_CLUSTER_ROLE` environment variable. In role bindings for the Argo CD server, the Operator refers to the `SERVER_CLUSTER_ROLE` environment variable. If these environment variables contain custom roles, the Operator doesn't create the default admin role. Instead, it uses the existing custom role for all managed namespaces. link:https://issues.redhat.com/browse/GITOPS-1290[GITOPS-1290] - -* With this update, the *Environments* page in the {product-title} *Developer* perspective displays a broken heart icon to indicate degraded resources, excluding ones whose status is `Progressing`, `Missing`, and `Unknown`. The console displays a yellow yield sign icon to indicate out-of-sync resources. link:https://issues.redhat.com/browse/GITOPS-1307[GITOPS-1307] - -[id="fixed-issues-1-4-0_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, when the Route to the {gitops-title} Application Manager CLI (`kam`) was accessed without specifying a path in the URL, a default page without any helpful information was displayed to the user. This update fixes the issue so that the default page displays download links for the `kam` CLI. link:https://issues.redhat.com/browse/GITOPS-923[GITOPS-923] - -* Before this update, setting a resource quota in the namespace of the Argo CD custom resource might cause the setup of the Red Hat SSO (RH SSO) instance to fail. This update fixes this issue by setting a minimum resource request for the RH SSO deployment pods. link:https://issues.redhat.com/browse/GITOPS-1297[GITOPS-1297] - -* Before this update, if you changed the log level for the `argocd-repo-server` workload, the Operator didn't reconcile this setting. The workaround was to delete the deployment resource so that the Operator recreated it with the new log level. With this update, the log level is correctly reconciled for existing `argocd-repo-server` workloads. link:https://issues.redhat.com/browse/GITOPS-1387[GITOPS-1387] - -* Before this update, if the Operator managed an Argo CD instance that lacked the `.data` field in the `argocd-secret` Secret, the Operator on that instance crashed. This update fixes the issue so that the Operator doesn't crash when the `.data` field is missing. Instead, the secret regenerates and the `gitops-operator-controller-manager` resource is redeployed. link:https://issues.redhat.com/browse/GITOPS-1402[GITOPS-1402] - -* Before this update, the `gitopsservice` service was annotated as an internal object. This update removes the annotation so you can update or delete the default Argo CD instance and run GitOps workloads on infrastructure nodes by using the UI. link:https://issues.redhat.com/browse/GITOPS-1429[GITOPS-1429] - -[id="known-issues-1-4-0_{context}"] -== Known issues - -These are the known issues in the current release: - -* If you migrate from the Dex authentication provider to the Keycloak provider, you might experience login issues with Keycloak. -+ -To prevent this issue, when migrating, uninstall Dex by removing the `.spec.dex` section from the Argo CD custom resource. Allow a few minutes for Dex to uninstall completely. Then, install Keycloak by adding `.spec.sso.provider: keycloak` to the Argo CD custom resource. -+ -As a workaround, uninstall Keycloak by removing `.spec.sso.provider: keycloak`. Then, re-install it. link:https://issues.redhat.com/browse/GITOPS-1450[GITOPS-1450], link:https://issues.redhat.com/browse/GITOPS-1331[GITOPS-1331] diff --git a/modules/gitops-release-notes-1-4-1.adoc b/modules/gitops-release-notes-1-4-1.adoc deleted file mode 100644 index 802b7b7baca6..000000000000 --- a/modules/gitops-release-notes-1-4-1.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-1_{context}"] -= Release notes for {gitops-title} 1.4.1 - -{gitops-title} 1.4.1 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-1_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* {gitops-title} Operator v1.4.0 introduced a regression which removes the description fields from `spec` for the following CRDs: - -** `argoproj.io_applications.yaml` -** `argoproj.io_appprojects.yaml` -** `argoproj.io_argocds.yaml` -+ -Before this update, when you created an `AppProject` resource using the `oc create` command, the resource failed to synchronize due to the missing description fields. This update restores the missing description fields in the preceding CRDs. link:https://issues.redhat.com/browse/GITOPS-1721[GITOPS-1721] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-4-11.adoc b/modules/gitops-release-notes-1-4-11.adoc deleted file mode 100644 index c6ec704e85fb..000000000000 --- a/modules/gitops-release-notes-1-4-11.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-4-11_{context}"] -= Release notes for {gitops-title} 1.4.11 - -{gitops-title} 1.4.11 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="new-features-1-4-11_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, the bundled Argo CD has been updated to version 2.2.12. - -[id="fixed-issues-1-4-11_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, the `redis-ha-haproxy` pods of an ArgoCD instance failed when more restrictive SCCs were present in the cluster. This update fixes the issue by updating the security context in workloads. link:https://issues.redhat.com/browse/GITOPS-2034[GITOPS-2034] - -[id="known-issues-1-4-11_{context}"] -== Known issues - -* {gitops-title} Operator can use RHSSO (KeyCloak) with OIDC and Dex. However, with a recent security fix applied, the Operator cannot validate the RHSSO certificate in some scenarios. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -As a workaround, disable TLS validation for the OIDC (Keycloak/RHSSO) endpoint in the ArgoCD specification. -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd -spec: - extraConfig: - "admin.enabled": "true" -... ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-4-12.adoc b/modules/gitops-release-notes-1-4-12.adoc deleted file mode 100644 index cab800e7dcc3..000000000000 --- a/modules/gitops-release-notes-1-4-12.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-4-12_{context}"] -= Release notes for {gitops-title} 1.4.12 - -{gitops-title} 1.4.12 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-12_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, in a large set of applications the application controllers were restarted multiple times due to the unresponsiveness of liveness probes. This update fixes the issue by removing the liveness probe in the application controller `StatefulSet` object. link:https://issues.redhat.com/browse/GITOPS-2153[GITOPS-2153] - -* Before this update, the RHSSO certificate cannot be validated when it is set up with a certificate which is not signed by certificate authorities. This update fixes the issue and now you can provide a custom certificate which will be used in verifying the Keycloak's TLS certificate when communicating with it. You can add the `rootCA` to the Argo CD custom resource `.spec.keycloak.rootCA` field. The Operator reconciles this change and updates the `oidc.config` field in the `argocd-cm` `ConfigMap` with the PEM-encoded root certificate. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -[NOTE] -==== -Restart the Argo CD server pod after updating the `.spec.keycloak.rootCA` field. -==== -+ -For example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - sso: - provider: keycloak - keycloak: - rootCA: | - ---- BEGIN CERTIFICATE ---- - This is a dummy certificate - Please place this section with appropriate rootCA - ---- END CERTIFICATE ---- - server: - route: - enabled: true ----- - -* Before this update, a terminating namespace that was managed by Argo CD would block the creation of roles and other configuration of other managed namespaces. This update fixes this issue. link:https://issues.redhat.com/browse/GITOPS-2276[GITOPS-2276] - -* Before this update, the Dex pods failed to start with `CreateContainerConfigError` when an SCC of `anyuid` was assigned to the Dex `ServiceAccount` resource. This update fixes this issue by assigning a default user id to the Dex container. link:https://issues.redhat.com/browse/GITOPS-2235[GITOPS-2235] - diff --git a/modules/gitops-release-notes-1-4-13.adoc b/modules/gitops-release-notes-1-4-13.adoc deleted file mode 100644 index 5f42b2c0286c..000000000000 --- a/modules/gitops-release-notes-1-4-13.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-4-13_{context}"] -= Release notes for {gitops-title} 1.4.13 - -{gitops-title} 1.4.13 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-13_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* From {product-title} 4.12, it is optional to install the console. This fix updates the {gitops-title} Operator to prevent errors with the Operator if the console is not installed. link:https://issues.redhat.com/browse/GITOPS-2354[GITOPS-2354] diff --git a/modules/gitops-release-notes-1-4-2.adoc b/modules/gitops-release-notes-1-4-2.adoc deleted file mode 100644 index c9bfc9738cfe..000000000000 --- a/modules/gitops-release-notes-1-4-2.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-2_{context}"] -= Release notes for {gitops-title} 1.4.2 - -[role="_abstract"] -{gitops-title} 1.4.2 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-2_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* Before this update, the *Route* resources got stuck in `Progressing` Health status if more than one `Ingress` were attached to the route. This update fixes the health check and reports the correct health status of the *Route* resources. link:https://issues.redhat.com/browse/GITOPS-1751[GITOPS-1751] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-4-3.adoc b/modules/gitops-release-notes-1-4-3.adoc deleted file mode 100644 index bf0d1cf49cd8..000000000000 --- a/modules/gitops-release-notes-1-4-3.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-3_{context}"] -= Release notes for {gitops-title} 1.4.3 - -[role="_abstract"] -{gitops-title} 1.4.3 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-3_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* Before this update, the TLS certificate in the `argocd-tls-certs-cm` configuration map was deleted by the {gitops-title} unless the certificate was configured in the ArgoCD CR specification `tls.initialCerts` field. This update fixes this issue. link:https://issues.redhat.com/browse/GITOPS-1725[GITOPS-1725] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-4-5.adoc b/modules/gitops-release-notes-1-4-5.adoc deleted file mode 100644 index d3428ae40f84..000000000000 --- a/modules/gitops-release-notes-1-4-5.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-5_{context}"] -= Release notes for {gitops-title} 1.4.5 - -[role="_abstract"] -{gitops-title} 1.4.5 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-5_{context}"] -== Fixed issues - -[WARNING] -==== -You should directly upgrade to {gitops-title} v1.4.5 from {gitops-title} v1.4.3. Do not use {gitops-title} v1.4.4 in a production environment. Major issues that affected {gitops-title} v1.4.4 are fixed in {gitops-title} 1.4.5. -==== - -The following issue has been resolved in the current release: - -* Before this update, Argo CD pods were stuck in the `ErrImagePullBackOff` state. The following error message was shown: -[source,yaml] ----- -reason: ErrImagePull - message: >- - rpc error: code = Unknown desc = reading manifest - sha256:ff4ad30752cf0d321cd6c2c6fd4490b716607ea2960558347440f2f370a586a8 - in registry.redhat.io/openshift-gitops-1/argocd-rhel8: StatusCode: - 404, <HTML><HEAD><TITLE>Error ----- -This issue is now fixed. link:https://issues.redhat.com/browse/GITOPS-1848[GITOPS-1848] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-4-6.adoc b/modules/gitops-release-notes-1-4-6.adoc deleted file mode 100644 index c42e19f3f4ad..000000000000 --- a/modules/gitops-release-notes-1-4-6.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-4-6_{context}"] -= Release notes for {gitops-title} 1.4.6 - -[role="_abstract"] -{gitops-title} 1.4.6 is now available on {product-title} 4.7, 4.8, 4.9, and 4.10. - -[id="fixed-issues-1-4-6_{context}"] -== Fixed issues - -The following issue has been resolved in the current release: - -* The base images are updated to the latest version to avoid OpenSSL flaw link: https://access.redhat.com/security/cve/CVE-2022-0778[(CVE-2022-0778)]. - -[NOTE] -==== -To install the current release of {gitops-title} 1.4 and receive further updates during its product life cycle, switch to the **GitOps-1.4** channel. -==== \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-0.adoc b/modules/gitops-release-notes-1-5-0.adoc deleted file mode 100644 index 0efb86c2683e..000000000000 --- a/modules/gitops-release-notes-1-5-0.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -[id="gitops-release-notes-1-5-0_{context}"] -= Release notes for {gitops-title} 1.5.0 - -[role="_abstract"] -{gitops-title} 1.5.0 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="new-features-1-5-0_{context}"] -== New features - -The current release adds the following improvements: - -* This enhancement upgrades Argo CD to version *2.3.3*. link:https://issues.redhat.com/browse/GITOPS-1708[GITOPS-1708] - -* This enhancement upgrades Dex to version *2.30.3*. link:https://issues.redhat.com/browse/GITOPS-1850[GITOPS-1850] - -* This enhancement upgrades Helm to version *3.8.0*. link:https://issues.redhat.com/browse/GITOPS-1709[GITOPS-1709] - -* This enhancement upgrades Kustomize to version *4.4.1*. link:https://issues.redhat.com/browse/GITOPS-1710[GITOPS-1710] - -* This enhancement upgrades Application Set to version *0.4.1*. - -* With this update, a new channel by the name *latest* has been added that provides the latest release of the {gitops-title}. For GitOps v1.5.0, the Operator is pushed to *gitops-1.5*, *latest* channel, and the existing *stable* channel. From GitOps v1.6 all the latest releases will be pushed only to the *latest* channel and not the *stable* channel. link:https://issues.redhat.com/browse/GITOPS-1791[GITOPS-1791] - -* With this update, the new CSV adds the `olm.skipRange: '>=1.0.0 <1.5.0'` annotation. As a result, all the previous release versions will be skipped. The Operator upgrades to v1.5.0 directly. link:https://issues.redhat.com/browse/GITOPS-1787[GITOPS-1787] - -* With this update, the Operator updates the Red Hat Single Sign-On (RH-SSO) to version v7.5.1 including the following enhancements: - -** You can log in to Argo CD using the OpenShift credentials including the `kube:admin` credential. -** The RH-SSO supports and configures Argo CD instances for Role-based Access Control (RBAC) using OpenShift groups. -** The RH-SSO honors the `HTTP_Proxy` environment variables. You can use the RH-SSO as an SSO for Argo CD running behind a proxy. -+ -link:https://issues.redhat.com/browse/GITOPS-1330[GITOPS-1330] - -* With this update, a new `.host` URL field is added to the `.status` field of the Argo CD operand. When a route or ingress is enabled with the priority given to route, then the new URL field displays the route. If no URL is provided from the route or ingress, the `.host` field is not displayed. -+ -When the route or ingress is configured, but the corresponding controller is not set up properly and is not in the `Ready` state or does not propagate its URL, the value of the `.status.host` field in the operand indicates as `Pending` instead of displaying the URL. This affects the overall status of the operand by making it `Pending` instead of `Available`. link:https://issues.redhat.com/browse/GITOPS-654[GITOPS-654] - -[id="fixed-issues-1-5-0_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, RBAC rules specific to *AppProjects* would not allow the use of commas for the subject field of the role, thus preventing bindings to the LDAP account. This update fixes the issue and you can now specify complex role bindings in *AppProject* specific RBAC rules. link:https://issues.redhat.com/browse/GITOPS-1771[GITOPS-1771] - -* Before this update, when a `DeploymentConfig` resource is scaled to `0`, Argo CD displayed it in a *progressing* state with a health status message as *"replication controller is waiting for pods to run"*. This update fixes the edge case and the health check now reports the correct health status of the `DeploymentConfig` resource. link:https://issues.redhat.com/browse/GITOPS-1738[GITOPS-1738] - -* Before this update, the TLS certificate in the `argocd-tls-certs-cm` configuration map was deleted by the {gitops-title} unless the certificate was configured in the `ArgoCD` CR specification `tls.initialCerts` field. This issue is fixed now. link:https://issues.redhat.com/browse/GITOPS-1725[GITOPS-1725] - -* Before this update, while creating a namespace with the `managed-by` label it created a lot of `RoleBinding` resources on the new namespace. This update fixes the issue and now {gitops-title} removes the irrelevant `Role` and `RoleBinding` resources created by the previous versions. link:https://issues.redhat.com/browse/GITOPS-1550[GITOPS-1550] - -* Before this update, the TLS certificate of the route in pass-through mode did not have a CA name. As a result, Firefox 94 and later failed to connect to Argo CD UI with error code *SEC_ERROR_BAD_DER*. This update fixes the issue. You must delete the `` secrets and let it recreate. Then, you must delete the `` secrets. After the {gitops-title} recreates it, the Argo CD UI is accessible by Firefox again. link:https://issues.redhat.com/browse/GITOPS-1548[GITOPS-1548] - -[id="known-issues-1-5-0_{context}"] -== Known issues - -* Argo CD `.status.host` field is not updated when an `Ingress` resource is in use instead of a `Route` resource on OpenShift clusters. link:https://issues.redhat.com/browse/GITOPS-1920[GITOPS-1920] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-1.adoc b/modules/gitops-release-notes-1-5-1.adoc deleted file mode 100644 index 566284e66310..000000000000 --- a/modules/gitops-release-notes-1-5-1.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-1_{context}"] -= Release notes for {gitops-title} 1.5.1 - -{gitops-title} 1.5.1 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-1_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, if Argo CD's anonymous access was enabled, an unauthenticated user was able to craft a JWT token and get full access to the Argo CD instance. This issue is fixed now. link:https://bugzilla.redhat.com/show_bug.cgi?id=2081686[CVE-2022-29165] - -* Before this update, an unauthenticated user was able to display error messages on the login screen while SSO was enabled. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2081689[CVE-2022-24905] - -* Before this update, all unpatched versions of Argo CD v0.7.0 and later were vulnerable to a symlink-following bug. As a result, an unauthorized user with repository write access would be able to leak sensitive files from Argo CD's repo-server. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2081686[CVE-2022-24904] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-2.adoc b/modules/gitops-release-notes-1-5-2.adoc deleted file mode 100644 index bfc7a55b7674..000000000000 --- a/modules/gitops-release-notes-1-5-2.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-2_{context}"] -= Release notes for {gitops-title} 1.5.2 - -{gitops-title} 1.5.2 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-2_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, images referenced by the `redhat-operator-index` were missing. This issue is now fixed. link:https://issues.redhat.com/browse/GITOPS-2036[GITOPS-2036] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-3.adoc b/modules/gitops-release-notes-1-5-3.adoc deleted file mode 100644 index 09068129474b..000000000000 --- a/modules/gitops-release-notes-1-5-3.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-3_{context}"] -= Release notes for {gitops-title} 1.5.3 - -{gitops-title} 1.5.3 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-3_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, all unpatched versions of Argo CD v1.0.0 and later were vulnerable to a cross-site scripting bug. As a result, an unauthorized user would be able to inject a javascript link in the UI. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2096278[CVE-2022-31035] - -* Before this update, all versions of Argo CD v0.11.0 and later were vulnerable to multiple attacks when SSO login was initiated from the Argo CD CLI or the UI. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2096282[CVE-2022-31034] - -* Before this update, all unpatched versions of Argo CD v0.7 and later were vulnerable to a memory consumption bug. As a result, an unauthorized user would be able to crash the Argo CD's repo-server. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2096283[CVE-2022-31016] - -* Before this update, all unpatched versions of Argo CD v1.3.0 and later were vulnerable to a symlink-following bug. As a result, an unauthorized user with repository write access would be able to leak sensitive YAML files from Argo CD's repo-server. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2096291[CVE-2022-31036] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-4.adoc b/modules/gitops-release-notes-1-5-4.adoc deleted file mode 100644 index 7db547928f1d..000000000000 --- a/modules/gitops-release-notes-1-5-4.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-4_{context}"] -= Release notes for {gitops-title} 1.5.4 - -{gitops-title} 1.5.4 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-4_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, the {gitops-title} was using an older version of the *REDIS 5* image tag. This update fixes the issue and upgrades the `rhel8/redis-5` image tag. link:https://issues.redhat.com/browse/GITOPS-2037[GITOPS-2037] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-5.adoc b/modules/gitops-release-notes-1-5-5.adoc deleted file mode 100644 index 36f912fad862..000000000000 --- a/modules/gitops-release-notes-1-5-5.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-5_{context}"] -= Release notes for {gitops-title} 1.5.5 - -{gitops-title} 1.5.5 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="new-features-1-5-5_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, the bundled Argo CD has been updated to version 2.3.7. - -[id="fixed-issues-1-5-5_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, the `redis-ha-haproxy` pods of an ArgoCD instance failed when more restrictive SCCs were present in the cluster. This update fixes the issue by updating the security context in workloads. link:https://issues.redhat.com/browse/GITOPS-2034[GITOPS-2034] - -[id="known-issues-1-5-5_{context}"] -== Known issues - -* {gitops-title} Operator can use RHSSO (KeyCloak) with OIDC and Dex. However, with a recent security fix applied, the Operator cannot validate the RHSSO certificate in some scenarios. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -As a workaround, disable TLS validation for the OIDC (Keycloak/RHSSO) endpoint in the ArgoCD specification. -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd -spec: - extraConfig: - "admin.enabled": "true" -... ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-5-6.adoc b/modules/gitops-release-notes-1-5-6.adoc deleted file mode 100644 index 25db860d7198..000000000000 --- a/modules/gitops-release-notes-1-5-6.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-6_{context}"] -= Release notes for {gitops-title} 1.5.6 - -{gitops-title} 1.5.6 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-6_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, in a large set of applications the application controllers were restarted multiple times due to the unresponsiveness of liveness probes. This update fixes the issue by removing the liveness probe in the application controller `StatefulSet` object. link:https://issues.redhat.com/browse/GITOPS-2153[GITOPS-2153] - -* Before this update, the RHSSO certificate cannot be validated when it is set up with a certificate which is not signed by certificate authorities. This update fixes the issue and now you can provide a custom certificate which will be used in verifying the Keycloak's TLS certificate when communicating with it. You can add the `rootCA` to the Argo CD custom resource `.spec.keycloak.rootCA` field. The Operator reconciles this change and updates the `oidc.config` field in the `argocd-cm` `ConfigMap` with the PEM-encoded root certificate. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -[NOTE] -==== -Restart the Argo CD server pod after updating the `.spec.keycloak.rootCA` field. -==== -+ -For example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - sso: - provider: keycloak - keycloak: - rootCA: | - ---- BEGIN CERTIFICATE ---- - This is a dummy certificate - Please place this section with appropriate rootCA - ---- END CERTIFICATE ---- - server: - route: - enabled: true ----- - -* Before this update, a terminating namespace that was managed by Argo CD would block the creation of roles and other configuration of other managed namespaces. This update fixes this issue. link:https://issues.redhat.com/browse/GITOPS-2278[GITOPS-2278] - -* Before this update, the Dex pods failed to start with `CreateContainerConfigError` when an SCC of `anyuid` was assigned to the Dex `ServiceAccount` resource. This update fixes this issue by assigning a default user id to the Dex container. link:https://issues.redhat.com/browse/GITOPS-2235[GITOPS-2235] - diff --git a/modules/gitops-release-notes-1-5-7.adoc b/modules/gitops-release-notes-1-5-7.adoc deleted file mode 100644 index 9f3ff842d5d3..000000000000 --- a/modules/gitops-release-notes-1-5-7.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-7_{context}"] -= Release notes for {gitops-title} 1.5.7 - -{gitops-title} 1.5.7 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-7_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* From {product-title} 4.12, it is optional to install the console. This fix updates the {gitops-title} Operator to prevent errors with the Operator if the console is not installed. link:https://issues.redhat.com/browse/GITOPS-2353[GITOPS-2353] diff --git a/modules/gitops-release-notes-1-5-9.adoc b/modules/gitops-release-notes-1-5-9.adoc deleted file mode 100644 index 63f69f7de14f..000000000000 --- a/modules/gitops-release-notes-1-5-9.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-5-9_{context}"] -= Release notes for {gitops-title} 1.5.9 - -{gitops-title} 1.5.9 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-5-9_{context}"] -== Fixed issues - -* Before this update, all versions of Argo CD v1.8.2 and later were vulnerable to an improper authorization bug. As a result, Argo CD would accept tokens for users who might not be authorized to access the cluster. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2160492[CVE-2023-22482] diff --git a/modules/gitops-release-notes-1-6-0.adoc b/modules/gitops-release-notes-1-6-0.adoc deleted file mode 100644 index 9f423d740955..000000000000 --- a/modules/gitops-release-notes-1-6-0.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE - -[id="gitops-release-notes-1-6-0_{context}"] -= Release notes for {gitops-title} 1.6.0 - -{gitops-title} 1.6.0 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="new-features-1-6-0_{context}"] -== New features - -The current release adds the following improvements: - -* Previously, the Argo CD `ApplicationSet` controller was a technology preview (TP) feature. With this update, it is a general availability (GA) feature. link:https://issues.redhat.com/browse/GITOPS-1958[GITOPS-1958] - -* With this update, the latest releases of the {gitops-title} are available in `latest` and version-based channels. To get these upgrades, update the `channel` parameter in the `Subscription` object YAML file: change its value from `stable` to `latest` or a version-based channel such as `gitops-1.6`. link:https://issues.redhat.com/browse/GITOPS-1791[GITOPS-1791] - -* With this update, the parameters of the `spec.sso` field that controlled the keycloak configurations are moved to `.spec.sso.keycloak`. -The parameters of the `.spec.dex` field have been added to `.spec.sso.dex`. Start using `.spec.sso.provider` to enable or disable Dex. The `.spec.dex` parameters are deprecated and planned to be removed in version 1.9, along with the `DISABLE_DEX` and `.spec.sso` fields for keycloak configuration. link:https://issues.redhat.com/browse/GITOPS-1983[GITOPS-1983] - -* With this update, the Argo CD Notifications controller is available as an optional workload that can be enabled or disabled by using the `.spec.notifications.enabled` parameter in the Argo CD custom resource. The Argo CD Notifications controller is available as a Technical Preview feature. link:https://issues.redhat.com/browse/GITOPS-1917[GITOPS-1917] - -:FeatureName: Argo CD Notifications controller -include::snippets/technology-preview.adoc[] - -* With this update, resource exclusions for Tekton pipeline runs and tasks runs are added by default. Argo CD, prunes these resources by default. These resource exclusions are added to the new Argo CD instances that are created from the {product-title}. If the instances are created from the CLI, the resources are not added. link:https://issues.redhat.com/browse/GITOPS-1876[GITOPS-1876] - -* With this update, you can select the tracking method that by Argo CD uses by setting the `resourceTrackingMethod` parameter in the Operand's specification. link:https://issues.redhat.com/browse/GITOPS-1862[GITOPS-1862] - -* With this update, you can add entries to the `argocd-cm` configMap using the `extraConfig` field of {gitops-title} Argo CD custom resource. The entries specified are reconciled to the live `config-cm` configMap without validations. link:https://issues.redhat.com/browse/GITOPS-1964[GITOPS-1964] - -* With this update, on {product-title} 4.11, the {gitops-title} *Environments* page in the *Developer* perspective shows history of the successful deployments of the application environments, along with links to the revision for each deployment. link:https://issues.redhat.com/browse/GITOPS-1269[GITOPS-1269] - -* With this update, you can manage resources with Argo CD that are also being used as template resources or "source" by an Operator. link:https://issues.redhat.com/browse/GITOPS-982[GITOPS-982] - -* With this update, the Operator will now configure the Argo CD workloads with the correct permissions to satisfy the Pod Security Admission that has been enabled for Kubernetes 1.24. link:https://issues.redhat.com/browse/GITOPS-2026[GITOPS-2026] - -* With this update, Config Management Plugins 2.0 is supported. You can use the Argo CD custom resource to specify sidebar containers for the repo server. link:https://issues.redhat.com/browse/GITOPS-766[GITOPS-776] - -* With this update, all communication between the Argo CD components and the Redis cache are properly secured using modern TLS encryption. link:https://issues.redhat.com/browse/GITOPS-720[GITOPS-720] - -* This release of {gitops-title} adds support for IBM Z and IBM Power on {product-title} 4.10. Currently, installations in restricted environments are not supported on IBM Z and IBM Power. - -[id="fixed-issues-1-6-0_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, the `system:serviceaccount:argocd:gitops-argocd-application-controller` cannot create resource "prometheusrules" in API group `monitoring.coreos.com` in the namespace `webapps-dev`. This update fixes this issue and {gitops-title} is now able to manage all resources from the `monitoring.coreos.com` API group. link:https://issues.redhat.com/browse/GITOPS-1638[GITOPS-1638] - -* Before this update, while reconciling cluster permissions, if a secret belonged to a cluster config instance it was deleted. This update fixes this issue. Now, the `namespaces` field from the secret is deleted instead of the secret. link:https://issues.redhat.com/browse/GITOPS-1777[GITOPS-1777] - -* Before this update, if you installed the HA variant of Argo CD through the Operator, the Operator created the Redis `StatefulSet` object with `podAffinity` rules instead of `podAntiAffinity` rules. This update fixes this issue and now the Operator creates the Redis `StatefulSet` with `podAntiAffinity` rules. link:https://issues.redhat.com/browse/GITOPS-1645[GITOPS-1645] - -* Before this update, Argo CD **ApplicationSet** had too many `ssh` Zombie processes. This update fixes this issue: it adds tini, a simple init daemon that spawns processes and reaps zombies, to the **ApplicationSet** controller. This ensures that a `SIGTERM` signal is properly passed to the running process, preventing it from being a zombie process. link:https://issues.redhat.com/browse/GITOPS-2108[GITOPS-2108] - -[id="known-issues-1-6-0_{context}"] -== Known issues - -* {gitops-title} Operator can make use of RHSSO (KeyCloak) through OIDC in addition to Dex. However, with a recent security fix applied, the certificate of RHSSO cannot be validated in some scenarios. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -As a workaround, disable TLS validation for the OIDC (Keycloak/RHSSO) endpoint in the ArgoCD specification. - -[source,yaml] ----- -spec: - extraConfig: - oidc.tls.insecure.skip.verify: "true" -... ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-6-1.adoc b/modules/gitops-release-notes-1-6-1.adoc deleted file mode 100644 index 8b1460673385..000000000000 --- a/modules/gitops-release-notes-1-6-1.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-6-1_{context}"] -= Release notes for {gitops-title} 1.6.1 - -{gitops-title} 1.6.1 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-6-1_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, in a large set of applications the application controllers were restarted multiple times due to the unresponsiveness of liveness probes. This update fixes the issue by removing the liveness probe in the application controller `StatefulSet` object. link:https://issues.redhat.com/browse/GITOPS-2153[GITOPS-2153] - -* Before this update, the RHSSO certificate cannot be validated when it is set up with a certificate which is not signed by certificate authorities. This update fixes the issue and now you can provide a custom certificate which will be used in verifying the Keycloak's TLS certificate when communicating with it. You can add the `rootCA` to the Argo CD custom resource `.spec.keycloak.rootCA` field. The Operator reconciles this change and updates the `oidc.config` field in the `argocd-cm` `ConfigMap` with the PEM-encoded root certificate. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] -+ -[NOTE] -==== -Restart the Argo CD server pod after updating the `.spec.keycloak.rootCA` field. -==== -+ -For example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - sso: - provider: keycloak - keycloak: - rootCA: | - ---- BEGIN CERTIFICATE ---- - This is a dummy certificate - Please place this section with appropriate rootCA - ---- END CERTIFICATE ---- - server: - route: - enabled: true ----- - -* Before this update, a terminating namespace that was managed by Argo CD would block the creation of roles and other configuration of other managed namespaces. This update fixes this issue. link:https://issues.redhat.com/browse/GITOPS-2277[GITOPS-2277] - -* Before this update, the Dex pods failed to start with `CreateContainerConfigError` when an SCC of `anyuid` was assigned to the Dex `ServiceAccount` resource. This update fixes this issue by assigning a default user id to the Dex container. link:https://issues.redhat.com/browse/GITOPS-2235[GITOPS-2235] - diff --git a/modules/gitops-release-notes-1-6-2.adoc b/modules/gitops-release-notes-1-6-2.adoc deleted file mode 100644 index ea122b5bb8f1..000000000000 --- a/modules/gitops-release-notes-1-6-2.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-6-2_{context}"] -= Release notes for {gitops-title} 1.6.2 - -{gitops-title} 1.6.2 is now available on {product-title} 4.8, 4.9, 4.10 and 4.11. - -[id="new-features-1-6-2_{context}"] -== New features - -* This release removes the `DISABLE_DEX` environment variable from the `openshift-gitops-operator` CSV file. As a result, this environment variable is no longer set when you perform a fresh installation of {gitops-title}. link:https://issues.redhat.com/browse/GITOPS-2360[GITOPS-2360] - -[id="fixed-issues-1-6-2_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, the subscription health check was marked *degraded* for missing *InstallPlan* when more than 5 Operators were installed in a project. This update fixes the issue. link:https://issues.redhat.com/browse/GITOPS-2018[GITOPS-2018] - -* Before this update, the {gitops-title} Operator would spam the cluster with a deprecation notice warning whenever it detected that an Argo CD instance used deprecated fields. This update fixes this issue and shows only one warning event for each instance that detects a field. link:https://issues.redhat.com/browse/GITOPS-2230[GITOPS-2230] - -* From {product-title} 4.12, it is optional to install the console. This fix updates the {gitops-title} Operator to prevent errors with the Operator if the console is not installed. link:https://issues.redhat.com/browse/GITOPS-2352[GITOPS-2352] - - diff --git a/modules/gitops-release-notes-1-6-4.adoc b/modules/gitops-release-notes-1-6-4.adoc deleted file mode 100644 index 922f10fbb30e..000000000000 --- a/modules/gitops-release-notes-1-6-4.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-6-4_{context}"] -= Release notes for {gitops-title} 1.6.4 - -{gitops-title} 1.6.4 is now available on {product-title} 4.8, 4.9, 4.10, and 4.11. - -[id="fixed-issues-1-6-4_{context}"] -== Fixed issues - -* Before this update, all versions of Argo CD v1.8.2 and later were vulnerable to an improper authorization bug. As a result, Argo CD would accept tokens for audiences who might not be intended to access the cluster. This issue is now fixed. link:https://bugzilla.redhat.com/show_bug.cgi?id=2160492[CVE-2023-22482] diff --git a/modules/gitops-release-notes-1-7-0.adoc b/modules/gitops-release-notes-1-7-0.adoc deleted file mode 100644 index d4ae833c70a2..000000000000 --- a/modules/gitops-release-notes-1-7-0.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE - -[id="gitops-release-notes-1-7-0_{context}"] -= Release notes for {gitops-title} 1.7.0 - -{gitops-title} 1.7.0 is now available on {product-title} 4.10, 4.11, and 4.12. - -[id="new-features-1-7-0_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, you can add environment variables to the Notifications controller. link:https://issues.redhat.com/browse/GITOPS-2313[GITOPS-2313] - -* With this update, the default nodeSelector `"kubernetes.io/os": "linux"` key-value pair is added to all workloads such that they only schedule on Linux nodes. In addition, any custom node selectors are added to the default and take precedence if they have the same key. link:https://issues.redhat.com/browse/GITOPS-2215[GITOPS-2215] - -* With this update, you can set custom node selectors in the Operator workloads by editing their `GitopsService` custom resource. link:https://issues.redhat.com/browse/GITOPS-2164[GITOPS-2164] - -* With this update, you can use the RBAC policy matcher mode to select from the following options: `glob` (default) and `regex`.link:https://issues.redhat.com/browse/GITOPS-1975[GITOPS-1975] - -* With this update, you can customize resource behavior using the following additional subkeys: -+ -[options=header] -|=== -| Subkey | Key form | Mapped field in argocd-cm -| resourceHealthChecks | resource.customizations.health. | resource.customizations.health -| resourceIgnoreDifferences | resource.customizations.ignoreDifferences. | resource.customizations.ignoreDifferences -| resourceActions | resource.customizations.actions. | resource.customizations.actions -|=== -+ -link:https://issues.redhat.com/browse/GITOPS-1561[GITOPS-1561] -+ -[NOTE] -==== -In future releases, there is a possibility to deprecate the old method of customizing resource behavior by using only resourceCustomization and not subkeys. -==== - -* With this update, to use the *Environments* page in the *Developer* perspective, you must upgrade if you are using a {gitops-title} version prior to 1.7 and {product-title} 4.15 or above. link:https://issues.redhat.com/browse/GITOPS-2415[GITOPS-2415] - -* With this update, you can create applications, which are managed by the same control plane Argo CD instance, in any namespace in the same cluster. As an administrator, perform the following actions to enable this update: -** Add the namespace to the `.spec.sourceNamespaces` attribute for a cluster-scoped Argo CD instance that manages the application. -** Add the namespace to the `.spec.sourceNamespaces` attribute in the `AppProject` custom resource that is associated with the application.  -+ -link:https://issues.redhat.com/browse/GITOPS-2341[GITOPS-2341] - -:FeatureName: Argo CD applications in non-control plane namespaces -include::snippets/technology-preview.adoc[] - -* With this update, Argo CD supports the Server-Side Apply feature, which helps users to perform the following tasks: -** Manage large resources which are too big for the allowed annotation size of 262144 bytes. -** Patch an existing resource that is not managed or deployed by Argo CD. -+ -You can configure this feature at application or resource level. link:https://issues.redhat.com/browse/GITOPS-2340[GITOPS-2340] - -[id="fixed-issues-1-7-0_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, {gitops-title} releases were affected by an issue of Dex pods failing with `CreateContainerConfigError` error when the `anyuid` SCC was assigned to the Dex service account. This update fixes the issue by assigning a default user id to the Dex container. link:https://issues.redhat.com/browse/GITOPS-2235[GITOPS-2235] - -* Before this update, {gitops-title} used the RHSSO (Keycloak) through OIDC in addition to Dex. However, with a recent security fix, the certificate of RHSSO could not be validated when configured with a certificate not signed by one of the well-known certificate authorities. This update fixes the issue; you can now provide a custom certificate to verify the KeyCloak's TLS certificate while communicating with it. In addition, you can add `rootCA` to the Argo CD custom resource `.spec.keycloak.rootCA` field. The Operator reconciles such changes and updates the `oidc.config in argocd-cm` config map with the PEM encoded root certificate. link:https://issues.redhat.com/browse/GITOPS-2214[GITOPS-2214] - -Example Argo CD with Keycloak configuration: - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd -spec: - sso: - keycloak: - rootCA: '' - provider: keycloak -....... -....... ----- - -* Before this update, the application controllers restarted multiple times due to the unresponsiveness of liveness probes. This update fixes the issue by removing the liveness probe in the `statefulset` application controller. link:https://issues.redhat.com/browse/GITOPS-2153[GITOPS-2153] - -[id="known-issues-1-7-0_{context}"] -== Known issues - -* Before this update, the Operator did not reconcile the `mountsatoken` and `ServiceAccount` settings for the repository server. While this has been fixed, deletion of the service account does not revert to the default. link:https://issues.redhat.com/browse/GITOPS-1873[GITOPS-1873] - -* Workaround: Manually set the `spec.repo.serviceaccountfield to thedefault` service account. link:https://issues.redhat.com/browse/GITOPS-2452[GITOPS-2452] diff --git a/modules/gitops-release-notes-1-7-1.adoc b/modules/gitops-release-notes-1-7-1.adoc deleted file mode 100644 index fe3492527f3d..000000000000 --- a/modules/gitops-release-notes-1-7-1.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE - -[id="gitops-release-notes-1-7-1_{context}"] -= Release notes for {gitops-title} 1.7.1 - -{gitops-title} 1.7.1 is now available on {product-title} 4.10, 4.11, and 4.12. - -[id="errata-updates-1-7-1_{context}"] -== Errata updates - -=== RHSA-2023:0467 - {gitops-title} 1.7.1 security update advisory - -Issued: 2023-01-25 - -The list of security fixes that are included in this release is documented in the link:https://access.redhat.com/errata/RHSA-2023:0467[RHSA-2023:0467] advisory. - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- diff --git a/modules/gitops-release-notes-1-7-3.adoc b/modules/gitops-release-notes-1-7-3.adoc deleted file mode 100644 index e353b0c4bf62..000000000000 --- a/modules/gitops-release-notes-1-7-3.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-7-3_{context}"] -= Release notes for {gitops-title} 1.7.3 - -{gitops-title} 1.7.3 is now available on {product-title} 4.10, 4.11, and 4.12. - -[id="errata-updates-1-7-3_{context}"] -== Errata updates - -=== RHSA-2023:1454 - {gitops-title} 1.7.3 security update advisory - -Issued: 2023-03-23 - -The list of security fixes that are included in this release is documented in the link:https://access.redhat.com/errata/RHSA-2023:1454[RHSA-2023:1454] advisory. - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-7-4.adoc b/modules/gitops-release-notes-1-7-4.adoc deleted file mode 100644 index 9ee6ddabeb75..000000000000 --- a/modules/gitops-release-notes-1-7-4.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-7-4_{context}"] -= Release notes for {gitops-title} 1.7.4 - -{gitops-title} 1.7.4 is now available on {product-title} 4.10, 4.11, and 4.12. - -[id="errata-updates-1-7-4_{context}"] -== Errata updates - -=== RHSA-2023:1454 - {gitops-title} 1.7.4 security update advisory - -Issued: 2023-03-23 - -The list of security fixes that are included in this release is documented in the link:https://access.redhat.com/errata/RHSA-2023:1454[RHSA-2023:1454] advisory. - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-8-0.adoc b/modules/gitops-release-notes-1-8-0.adoc deleted file mode 100644 index 03279ff7fd1d..000000000000 --- a/modules/gitops-release-notes-1-8-0.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE -[id="gitops-release-notes-1-8-0_{context}"] -= Release notes for {gitops-title} 1.8.0 - -{gitops-title} 1.8.0 is now available on {product-title} 4.10, 4.11, 4.12, and 4.13. - -[id="new-features-1-8-0_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, you can add support for the ApplicationSet Progressive Rollout Strategy feature. Using this feature, you can enhance the ArgoCD ApplicationSet resource to embed a rollout strategy for a progressive application resource update after you modify the ApplicationSet spec or Application templates. When you enable this feature, applications are updated in a declarative order instead of simultaneously. link:https://issues.redhat.com/browse/GITOPS-956[GITOPS-956] -+ -[IMPORTANT] -==== -ApplicationSet Progressive Rollout Strategy is a Technology Preview feature. -==== -//https://github.com/argoproj/argo-cd/pull/12103 - -* With this update, the *Application environments* page in the *Developer* perspective of the {product-title} web console is decoupled from the {gitops-title} Application Manager command-line interface (CLI), `kam`. You do not have to use the `kam` CLI to generate Application Environment manifests for the environments to show up in the *Developer* perspective of the {product-title} web console. You can use your own manifests, but the environments must still be represented by namespaces. In addition, specific labels and annotations are still needed. link:https://issues.redhat.com/browse/GITOPS-1785[GITOPS-1785] - -* With this update, the {gitops-title} Operator and the `kam` CLI are now available to use on ARM architecture on {product-title}. link:https://issues.redhat.com/browse/GITOPS-1688[GITOPS-1688] -+ -[IMPORTANT] -==== -`spec.sso.provider: keycloak` is not yet supported on ARM. -==== - -* With this update, you can enable workload monitoring for specific Argo CD instances by setting the `.spec.monitoring.enabled` flag value to `true`. As a result, the Operator creates a `PrometheusRule` object that contains alert rules for each Argo CD component. These alert rules trigger an alert when the replica count of the corresponding component has drifted from the desired state for a certain amount of time. The Operator will not overwrite the changes made to the `PrometheusRule` object by the users. link:https://issues.redhat.com/browse/GITOPS-2459[GITOPS-2459] - -* With this update, you can pass command arguments to the repo server deployment using the Argo CD CR. link:https://issues.redhat.com/browse/GITOPS-2445[GITOPS-2445] -+ -For example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd -spec: - repo: - extraRepoCommandArgs: - - --max.combined.directory.manifests.size - - 10M ----- - -[id="fixed-issues-1-8-0_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, you could set the `ARGOCD_GIT_MODULES_ENABLED` environment variable only on the `openshift-gitops-repo-server` pod and not on the `ApplicationSet Controller` pod. As a result, when using the Git generator, Git submodules were cloned during the generation of child applications because the variable was missing from the `ApplicationSet Controller` environment. In addition, if the credentials required to clone these submodules were not configured in ArgoCD, the application generation failed. This update fixes the issue; you can now add any environment variables such as `ArgoCD_GIT_MODULES_ENABLED` to the `ApplicationSet Controller` pod using the Argo CD CR. The `ApplicationSet Controller` pod then successfully generates child applications from the cloned repository and no submodule is cloned in the process. link:https://issues.redhat.com/browse/GITOPS-2399[GITOPS-2399] -+ -For example: -+ -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: basic -spec: - applicationSet: - env: - - name: ARGOCD_GIT_MODULES_ENABLED - value: "true" ----- - -* Before this update, while installing the {gitops-title} Operator v1.7.0, the default `argocd-cm.yml` config map file created for authenticating Dex contained the base64-encoded client secret in the format of a `key:value` pair. This update fixes this issue by not storing the client secret in the default `argocd-cm.yml` config map file. Instead, the client secret is inside an `argocd-secret` object now, and you can reference it inside the configuration map as a secret name. link:https://issues.redhat.com/browse/GITOPS-2570[GITOPS-2570] - -[id="known-issues-1-8-0_{context}"] -== Known issues - -* When you deploy applications using your manifests without using the `kam` CLI and view the applications in the *Application environments* page in the *Developer* perspective of the {product-title} web console, the Argo CD URL to the corresponding application does not load the page as expected from the Argo CD icon in the card. link:https://issues.redhat.com/browse/GITOPS-2736[GITOPS-2736] diff --git a/modules/gitops-release-notes-1-8-1.adoc b/modules/gitops-release-notes-1-8-1.adoc deleted file mode 100644 index fa8a41ed6b06..000000000000 --- a/modules/gitops-release-notes-1-8-1.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-8-1_{context}"] -= Release notes for {gitops-title} 1.8.1 - -{gitops-title} 1.8.1 is now available on {product-title} 4.10, 4.11, 4.12, and 4.13. - -[id="errata-updates-1-8-1_{context}"] -== Errata updates - -=== RHSA-2023:1452 - {gitops-title} 1.8.1 security update advisory - -Issued: 2023-03-23 - -The list of security fixes that are included in this release is documented in the link:https://access.redhat.com/errata/RHSA-2023:1452[RHSA-2023:1452] advisory. - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- \ No newline at end of file diff --git a/modules/gitops-release-notes-1-8-2.adoc b/modules/gitops-release-notes-1-8-2.adoc deleted file mode 100644 index 94bbe3944aba..000000000000 --- a/modules/gitops-release-notes-1-8-2.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE -[id="gitops-release-notes-1-8-2_{context}"] -= Release notes for {gitops-title} 1.8.2 - -{gitops-title} 1.8.2 is now available on {product-title} 4.10, 4.11, 4.12, and 4.13. - -[id="fixed-issues-1-8-2_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, when you configured Dex using the `.spec.dex` parameter and tried to log in to the Argo CD UI by using the *LOG IN VIA OPENSHIFT* option, you were not able to log in. This update fixes the issue. -+ -[IMPORTANT] -==== -The `spec.dex` parameter in the ArgoCD CR is deprecated. In a future release of {gitops-title} v1.9, configuring Dex using the `spec.dex` parameter in the ArgoCD CR is planned to be removed. Consider using the `.spec.sso` parameter instead. See "Enabling or disabling Dex using .spec.sso". link:https://issues.redhat.com/browse/GITOPS-2761[GITOPS-2761] -==== - -* Before this update, the cluster and `kam` CLI pods failed to start with a new installation of {gitops-title} v1.8.0 on the {product-title} 4.10 cluster. This update fixes the issue and now all pods run as expected. link:https://issues.redhat.com/browse/GITOPS-2762[GITOPS-2762] \ No newline at end of file diff --git a/modules/gitops-release-notes-1-8-3.adoc b/modules/gitops-release-notes-1-8-3.adoc deleted file mode 100644 index 2792dae872d4..000000000000 --- a/modules/gitops-release-notes-1-8-3.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE -[id="gitops-release-notes-1-8-3_{context}"] -= Release notes for {gitops-title} 1.8.3 - -{gitops-title} 1.8.3 is now available on {product-title} 4.10, 4.11, 4.12, and 4.13. - -[id="errata-updates-1-8-3_{context}"] -== Errata updates - -=== RHBA-2023:3206 and RHSA-2023:3229 - {gitops-title} 1.8.3 security update advisory - -Issued: 2023-05-18 - -The list of security fixes that are included in this release is documented in the following advisories: - -* link:https://access.redhat.com/errata/RHBA-2023:3206[RHBA-2023:3206] -* link:https://access.redhat.com/errata/RHSA-2023:3229[RHSA-2023:3229] - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- - -[id="fixed-issues-1-8-3_{context}"] -== Fixed issues - -* Before this update, when `Autoscale` was enabled and the horizontal pod autoscaler (HPA) controller tried to edit the replica settings in server deployment, the Operator overwrote it. In addition, any changes specified to the autoscaler parameters were not propagated correctly to the HPA on the cluster. This update fixes the issue. Now the Operator reconciles on replica drift only if `Autoscale` is disabled and the HPA parameters are updated correctly. link:https://issues.redhat.com/browse/GITOPS-2629[GITOPS-2629] diff --git a/modules/gitops-release-notes-1-9-0.adoc b/modules/gitops-release-notes-1-9-0.adoc deleted file mode 100644 index 7e90132a6740..000000000000 --- a/modules/gitops-release-notes-1-9-0.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc -:_content-type: REFERENCE -[id="gitops-release-notes-1-9-0_{context}"] -= Release notes for {gitops-title} 1.9.0 - -{gitops-title} 1.9.0 is now available on {product-title} 4.12 and 4.13. - -[id="errata-updates-1-9-0_{context}"] -== Errata updates - -=== RHSA-2023:3557 - {gitops-title} 1.9.0 security update advisory - -Issued: 2023-06-09 - -The list of security fixes that are included in this release is documented in the following advisory: - -* link:https://access.redhat.com/errata/RHSA-2023:3557[RHSA-2023:3557] - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- - -[id="new-features-1-9-0_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, you can use a custom `must-gather` tool to collect diagnostic information for project-level resources, cluster-level resources, and {gitops-title} components. This tool provides the debugging information about the cluster associated with {gitops-title}, which you can share with the Red Hat Support team for analysis. link:https://issues.redhat.com/browse/GITOPS-2797[GITOPS-2797] -+ -[IMPORTANT] -==== -The custom `must-gather` tool is a Technology Preview feature. -==== - -* With this update, you can add support to progressive delivery using Argo Rollouts. Currently, the supported traffic manager is only {SMProductName}. link:https://issues.redhat.com/browse/GITOPS-959[GITOPS-959] -+ -[IMPORTANT] -==== -Argo Rollouts is a Technology Preview feature. -==== - -[role="_additional-resources"] -.Additional resources -* link:https://argo-rollouts-manager.readthedocs.io/en/latest/crd_reference/[Using Argo Rollouts] - -[id="deprecated-features-1-9-0_{context}"] -== Deprecated and removed features - -* In {gitops-title} 1.7.0, the `.spec.resourceCustomizations` parameter was deprecated. The deprecated `.spec.resourceCustomizations` parameter is planned to be removed in the upcoming {gitops-title} GA v1.10.0 release. You can use the new formats `spec.ResourceHealthChecks`, `spec.ResourceIgnoreDifferences`, and `spec.ResourceActions` instead. link:https://issues.redhat.com/browse/GITOPS-2890[GITOPS-2890] - -* With this update, the support for the following deprecated `sso` and `dex` fields extends until the upcoming {gitops-title} GA v1.10.0 release: -+ -** The `.spec.sso.image`, `.spec.sso.version`, `.spec.sso.resources`, and `.spec.sso.verifyTLS` fields. -** The `.spec.dex` parameter along with `DISABLE_DEX`. -+ -The deprecated previous `sso` and `dex` fields were earlier scheduled for removal in the {gitops-title} v1.9.0 release but are now planned to be removed in the upcoming {gitops-title} GA v1.10.0 release. -link:https://issues.redhat.com/browse/GITOPS-2904[GITOPS-2904] - -[id="fixed-issues-1-9-0_{context}"] -== Fixed issues -The following issues have been resolved in the current release: - -* Before this update, when the `argocd-server-tls` secret was updated with a new certificate Argo CD was not always picking up this secret. As a result, the old expired certificate was presented. This update fixes the issue with a new `GetCertificate` function and ensures that the latest version of certificates is in use. When adding new certificates, now Argo CD picks them up automatically without the user having to restart the `argocd-server` pod. link:https://issues.redhat.com/browse/GITOPS-2375[GITOPS-2375] - -* Before this update, when enforcing GPG signature verification against a `targetRevision` integer pointing to a signed Git tag, users got a `Target revision in Git is not signed` error. This update fixes the issue and lets users enforce GPG signature verification against signed Git tags. link:https://issues.redhat.com/browse/GITOPS-2418[GITOPS-2418] - -* Before this update, users could not connect to Microsoft Team Foundation Server (TFS) type Git repositories through Argo CD deployed by the Operator. This update fixes the issue by updating the Git version to -2.39.3 in the Operator. link:https://issues.redhat.com/browse/GITOPS-2768[GITOPS-2768] - -* Before this update, when the Operator was deployed and running with the High availability (HA) feature enabled, setting resource limits under the `.spec.ha.resources` field did not affect Redis HA pods. This update fixes the reconciliation by adding checks in the Redis reconciliation code. These checks ensure whether the `spec.ha.resources` field in the Argo CD custom resource (CR) is updated. When the Argo CD CR is updated with new CPU and memory requests or limit values for HA, now these changes are applied to the Redis HA pods. link:https://issues.redhat.com/browse/GITOPS-2404[GITOPS-2404] - -* Before this update, if a namespace-scoped Argo CD instance was managing multiple namespaces by using the `managed-by` label and one of those managed namespaces was in a *Terminating* state, the Argo CD instance could not deploy resources to all other managed namespaces. This update fixes the issue by enabling the Operator to remove the `managed-by` label from any previously managed now terminating namespace. Now, a terminating namespace managed by a namespace-scoped Argo CD instance does not block the deployment of resources to other managed namespaces. link:https://issues.redhat.com/browse/GITOPS-2627[GITOPS-2627] - -[id="known-issues-1-10_{context}"] -== Known issues -* Currently, the Argo CD does not read the Transport Layer Security (TLS) certificates from the path specified in the `argocd-tls-certs-cm` config map resulting in the `x509: certificate signed by unknown authority` error. -+ -Workaround: Perform the following steps: - -. Add the `SSL_CERT_DIR` environment variable: -+ -.Example Argo CD custom resource - -[source,yaml] ----- -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: example-argocd - labels: - example: repo -spec: - ... - repo: - env: - - name: SSL_CERT_DIR - value: /tmp/sslcertdir - volumeMounts: - - name: ssl - mountPath: /tmp/sslcertdir - volumes: - - name: ssl - configMap: - name: user-ca-bundle - ... ----- - -. Create an empty config map in the namespace where the subscription for your Operator exists and include the following label: -+ -.Example config map - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-ca-bundle <1> - labels: - config.openshift.io/inject-trusted-cabundle: "true" <2> ----- -<1> Name of the config map. -<2> Requests the Cluster Network Operator to inject the merged bundle. -+ -After creating this config map, the `user-ca-bundle` content from the `openshift-config` namespace automatically gets injected into this config map, even merged with the system ca-bundle. link:https://issues.redhat.com/browse/GITOPS-1482[GITOPS-1482] diff --git a/modules/gitops-release-notes-1-9-1.adoc b/modules/gitops-release-notes-1-9-1.adoc deleted file mode 100644 index cca0e6f9bff6..000000000000 --- a/modules/gitops-release-notes-1-9-1.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE - -[id="gitops-release-notes-1-9-1_{context}"] -= Release notes for {gitops-title} 1.9.1 - -{gitops-title} 1.9.1 is now available on {product-title} 4.12 and 4.13. - -[id="errata-updates-1-9-1_{context}"] -== Errata updates - -=== RHSA-2023:3591 and RHBA-2023:4117 - {gitops-title} 1.9.1 security update advisory - -Issued: 2023-07-17 - -The list of security fixes that are included in this release is documented in the following advisories: - -* link:https://access.redhat.com/errata/RHSA-2023:3591[RHSA-2023:3591] -* link:https://access.redhat.com/errata/RHBA-2023:4117[RHBA-2023:4117] - -If you have installed the {gitops-title} Operator, run the following command to view the container images in this release: - -[source,terminal] ----- -$ oc describe deployment gitops-operator-controller-manager -n openshift-operators ----- - -[id="new-features-1-9-1_{context}"] -== New features - -The current release adds the following improvements: - -* With this update, the bundled Argo CD has been updated to version 2.7.6. - -[id="fixed-issues-1-9-1_{context}"] -== Fixed issues - -The following issues have been resolved in the current release: - -* Before this update, Argo CD was becoming unresponsive when there was an increase in namespaces and applications. This update fixes the issue by removing a deadlock. Deadlock occurs when two functions are competing for resources. Now, you should not experience crashes or unresponsiveness when there is an increase in namespaces or applications. link:https://issues.redhat.com/browse/GITOPS-2782[GITOPS-2782] - -* Before this update, the Argo CD application controller resource could suddenly stop working when resynchronizing applications. This update fixes the issue by adding logic to prevent a cluster cache deadlock. Now, you should not experience the deadlock situation, and applications should resynchronize successfully. link:https://issues.redhat.com/browse/GITOPS-2880[GITOPS-2880] - -* Before this update, there was a mismatch in the RSA key for known hosts in the `argocd-ssh-known-hosts-cm` config map. This update fixes the issue by matching the RSA key with the upstream project. Now, you can use the default RSA keys on default deployments. link:https://issues.redhat.com/browse/GITOPS-3042[GITOPS-3042] - -* Before this update, the reconciliation timeout setting in the `argocd-cm` config map was not being correctly applied to the Argo CD application controller resource. This update fixes the issue by correctly reading and applying the reconciliation timeout setting from the `argocd-cm` config map. Now, you can modify the reconciliation timeout value from the `AppSync` setting without a problem. link:https://issues.redhat.com/browse/GITOPS-2810[GITOPS-2810] diff --git a/modules/gitops-repo-server-properties.adoc b/modules/gitops-repo-server-properties.adoc deleted file mode 100644 index 6ae98d80915b..000000000000 --- a/modules/gitops-repo-server-properties.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * argo-cd-custom-resource-properties.adoc - -:_content-type: REFERENCE -[id="argo-repo-server-properties_{context}"] -= Repo server properties - -[role="_abstract"] -The following properties are available for configuring the Repo server component: - -|=== -|**Name** |**Default** | **Description** -|`Resources` |`____` |The container compute resources. -|`MountSAToken` |`false` |Whether the `ServiceAccount` token should be mounted to the repo-server pod. -|`ServiceAccount` |`""` |The name of the `ServiceAccount` to use with the repo-server pod. -|`VerifyTLS` |`false` |Whether to enforce strict TLS checking on all components when communicating with repo server. -|`AutoTLS` |`""` |Provider to use for setting up TLS the repo-server's gRPC TLS certificate (one of: openshift). Currently only available for OpenShift. -|`Image` | `argoproj/argocd` |The container image for Argo CD Repo server. This overrides the `ARGOCD_REPOSERVER_IMAGE` environment variable. -|`Version` | same as `.spec.Version` |The tag to use with the Argo CD Repo server. -|`LogLevel` | `info` |The log level used by the Argo CD Repo server. Valid options are debug, info, error, and warn. -|`LogFormat` | `text` |The log format to be used by the Argo CD Repo server. Valid options are text or json. -|`ExecTimeout` | `180` |Execution timeout in seconds for rendering tools (e.g. Helm, Kustomize). -|`Env` | `____` |Environment to set for the repository server workloads. -|`Replicas` | `____` |The number of replicas for the Argo CD Repo server. Must be greater than or equal to `0`. -|=== - - - diff --git a/modules/gitops-synchronizing-your-application-application-with-your-git-repository.adoc b/modules/gitops-synchronizing-your-application-application-with-your-git-repository.adoc deleted file mode 100644 index 6c016acb3a2a..000000000000 --- a/modules/gitops-synchronizing-your-application-application-with-your-git-repository.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring-an-openshift-cluster-with-argo-cd.adoc - -:_content-type: PROCEDURE -[id="synchronizing-your-application-application-with-your-git-repository_{context}"] -= Synchronizing your application with your Git repository - -.Procedure -. In the Argo CD dashboard, notice that the *cluster-configs* Argo CD application has the statuses *Missing* and *OutOfSync*. Because the application was configured with a manual sync policy, Argo CD does not sync it automatically. - -. Click *SYNC* on the *cluster-configs* tile, review the changes, and then click *SYNCHRONIZE*. Argo CD will detect any changes in the Git repository automatically. If the configurations are changed, Argo CD will change the status of the *cluster-configs* to *OutOfSync*. You can modify the synchronization policy for Argo CD to automatically apply changes from your Git repository to the cluster. - -. Notice that the *cluster-configs* Argo CD application now has the statuses *Healthy* and *Synced*. Click the *cluster-configs* tile to check the details of the synchronized resources and their status on the cluster. - -. Navigate to the {product-title} web console and click {rh-app-icon} to verify that a link to the *Red Hat Developer Blog - Kubernetes* is now present there. - -. Navigate to the *Project* page and search for the `spring-petclinic` namespace to verify that it has been added to the cluster. -+ -Your cluster configurations have been successfully synchronized to the cluster. diff --git a/modules/gitops-uninstall-keycloak.adoc b/modules/gitops-uninstall-keycloak.adoc deleted file mode 100644 index 11669c861ac7..000000000000 --- a/modules/gitops-uninstall-keycloak.adoc +++ /dev/null @@ -1,23 +0,0 @@ -[id="gitops-uninstalling-keycloak_{context}"] -= Uninstalling Keycloak - -You can delete the Keycloak resources and their relevant configurations by removing the `SSO` field from the Argo CD Custom Resource (CR) file. After you remove the `SSO` field, the values in the file look similar to the following: - -[source,yaml] ----- - apiVersion: argoproj.io/v1alpha1 - kind: ArgoCD - metadata: - name: example-argocd - labels: - example: basic - spec: - server: - route: - enabled: true ----- - -[NOTE] -==== -A Keycloak application created by using this method is currently not persistent. Additional configurations created in the Argo CD Keycloak realm are deleted when the server restarts. -==== diff --git a/modules/gitops-using-argo-cd-instance-to-manage-cluster-scoped-resources.adoc b/modules/gitops-using-argo-cd-instance-to-manage-cluster-scoped-resources.adoc deleted file mode 100644 index 01fba3a92f80..000000000000 --- a/modules/gitops-using-argo-cd-instance-to-manage-cluster-scoped-resources.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: PROCEDURE -[id="using-argo-cd-instance-to-manage-cluster-scoped-resources{context}"] - -= Using an Argo CD instance to manage cluster-scoped resources - -To manage cluster-scoped resources, update the existing `Subscription` object for the `gitops-title` Operator and add the namespace of the Argo CD instance to the `ARGOCD_CLUSTER_CONFIG_NAMESPACES` environment variable in the `spec` section. - -[discrete] -.Procedure -. In the **Administrator** perspective of the web console, navigate to **Operators** → **Installed Operators** → **{gitops-title}** → **Subscription**. -. Click the **Actions** drop-down menu then click **Edit Subscription**. -. On the **openshift-gitops-operator** Subscription details page, under the **YAML** tab, edit the `Subscription` YAML file by adding the namespace of the Argo CD instance to the `ARGOCD_CLUSTER_CONFIG_NAMESPACES` environment variable in the `spec` section: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: openshift-gitops-operator - namespace: openshift-operators -... -spec: - config: - env: - - name: ARGOCD_CLUSTER_CONFIG_NAMESPACES - value: openshift-gitops, -... ----- -+ -. To verify that the Argo instance is configured with a cluster role to manage cluster-scoped resources, perform the following steps: -+ -.. Navigate to **User Management** → **Roles** and from the **Filter** drop-down menu select **Cluster-wide Roles**. -.. Search for the `argocd-application-controller` by using the **Search by name** field. -+ -The **Roles** page displays the created cluster role. -+ -[TIP] -==== -Alternatively, in the OpenShift CLI, run the following command: - -[source,terminal] ----- -oc auth can-i create oauth -n openshift-gitops --as system:serviceaccount:openshift-gitops:openshift-gitops-argocd-application-controller ----- - -The output `yes` verifies that the Argo instance is configured with a cluster role to manage cluster-scoped resources. Else, check your configurations and take necessary steps as required. -==== \ No newline at end of file diff --git a/modules/gitops-verifying-argo-cd-self-healing-behavior.adoc b/modules/gitops-verifying-argo-cd-self-healing-behavior.adoc deleted file mode 100644 index 037bdcc70af1..000000000000 --- a/modules/gitops-verifying-argo-cd-self-healing-behavior.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module is included in the following assemblies: -// -// * deploying-a-spring-boot-application-with-argo-cd - -:_content-type: PROCEDURE -[id="verifying-argo-cd-self-healing-behavior_{context}"] -= Verifying Argo CD self-healing behavior - -Argo CD constantly monitors the state of deployed applications, detects differences between the specified manifests in Git and live changes in the cluster, and then automatically corrects them. This behavior is referred to as self-healing. - -You can test and observe the self-healing behavior in Argo CD. - -.Prerequisites - -* The sample `app-spring-petclinic` application is deployed and configured. - -.Procedure - -. In the Argo CD dashboard, verify that your application has the `Synced` status. - -. Click the `app-spring-petclinic` tile in the Argo CD dashboard to view the application resources that are deployed to the cluster. - -. In the {product-title} web console, navigate to the *Developer* perspective. - -. Modify the Spring PetClinic deployment and commit the changes to the `app/` directory of the Git repository. Argo CD will automatically deploy the changes to the cluster. - -.. Fork the link:https://github.com/redhat-developer/openshift-gitops-getting-started[OpenShift GitOps getting started repository]. - -.. In the `deployment.yaml` file, change the `failureThreshold` value to `5`. - -.. In the deployment cluster, run the following command to verify the changed value of the `failureThreshold` field: -+ -[source,terminal] ----- -$ oc edit deployment spring-petclinic -n spring-petclinic ----- - -. Test the self-healing behavior by modifying the deployment on the cluster and scaling it up to two pods while watching the application in the {product-title} web console. -+ -.. Run the following command to modify the deployment: -+ -[source,terminal] ----- -$ oc scale deployment spring-petclinic --replicas 2 -n spring-petclinic ----- -.. In the {product-title} web console, notice that the deployment scales up to two pods and immediately scales down again to one pod. Argo CD detected a difference from the Git repository and auto-healed the application on the {product-title} cluster. - -. In the Argo CD dashboard, click the *app-spring-petclinic* tile → *APP DETAILS* → *EVENTS*. The *EVENTS* tab displays the following events: Argo CD detecting out of sync deployment resources on the cluster and then resyncing the Git repository to correct it. diff --git a/modules/go-add-infra-nodes.adoc b/modules/go-add-infra-nodes.adoc deleted file mode 100644 index 6e22560530ed..000000000000 --- a/modules/go-add-infra-nodes.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/run-gitops-control-plane-workload-on-infra-node.adoc - -:_content-type: PROCEDURE -[id="add-infra-nodes_{context}"] -= Moving {gitops-shortname} workloads to infrastructure nodes - -You can move the default workloads installed by the {gitops-title} to the infrastructure nodes. The workloads that can be moved are: - -* `kam deployment` -* `cluster deployment` (backend service) -* `openshift-gitops-applicationset-controller deployment` -* `openshift-gitops-dex-server deployment` -* `openshift-gitops-redis deployment` -* `openshift-gitops-redis-ha-haproxy deployment` -* `openshift-gitops-repo-sever deployment` -* `openshift-gitops-server deployment` -* `openshift-gitops-application-controller statefulset` -* `openshift-gitops-redis-server statefulset` - -.Procedure - -. Label existing nodes as infrastructure by running the following command: -+ -[source,terminal] ----- -$ oc label node node-role.kubernetes.io/infra= ----- -. Edit the `GitOpsService` custom resource (CR) to add the infrastructure node selector: -+ -[source,terminal] ----- -$ oc edit gitopsservice -n openshift-gitops ----- -. In the `GitOpsService` CR file, add `runOnInfra` field to the `spec` section and set it to `true`. This field moves the workloads in `openshift-gitops` namespace to the infrastructure nodes: -+ -[source,yaml] ----- -apiVersion: pipelines.openshift.io/v1alpha1 -kind: GitopsService -metadata: - name: cluster -spec: - runOnInfra: true ----- -. Optional: Apply taints and isolate the workloads on infrastructure nodes and prevent other workloads from scheduling on these nodes. -+ -[source,terminal] ----- -$ oc adm taint nodes -l node-role.kubernetes.io/infra -infra=reserved:NoSchedule infra=reserved:NoExecute ----- -+ -. Optional: If you apply taints to the nodes, you can add tolerations in the `GitOpsService` CR: -+ -[source,yaml] ----- -spec: - runOnInfra: true - tolerations: - - effect: NoSchedule - key: infra - value: reserved - - effect: NoExecute - key: infra - value: reserved ----- - -To verify that the workloads are scheduled on infrastructure nodes in the {gitops-title} namespace, click any of the pod names and ensure that the *Node selector* and *Tolerations* have been added. - -[NOTE] -==== -Any manually added *Node selectors* and *Tolerations* in the default Argo CD CR will be overwritten by the toggle and the tolerations in the `GitOpsService` CR. -==== diff --git a/modules/go-compatibility-and-support-matrix.adoc b/modules/go-compatibility-and-support-matrix.adoc deleted file mode 100644 index 957404e3643a..000000000000 --- a/modules/go-compatibility-and-support-matrix.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/gitops-release-notes.adoc - -:_content-type: REFERENCE -[id="GitOps-compatibility-support-matrix_{context}"] -= Compatibility and support matrix - -Some features in this release are currently in link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. These experimental features are not intended for production use. - -In the table, features are marked with the following statuses: - -* *TP*: _Technology Preview_ -* *GA*: _General Availability_ -* *NA*: _Not Applicable_ - -[IMPORTANT] -==== -In {product-title} 4.13, the `stable` channel has been removed. Before upgrading to {product-title} 4.13, if you are already on the `stable` channel, choose the appropriate channel and switch to it. -==== - -|=== -|*OpenShift GitOps* 7+|*Component Versions*|*OpenShift Versions* - -|*Version* |*`kam`* |*Helm* |*Kustomize* |*Argo CD*|*ApplicationSet* |*Dex* |*RH SSO* | -|1.9.0 |0.0.49 TP |3.11.2 GA|5.0.1 GA |2.7.2 GA |NA |2.35.1 GA |7.5.1 GA |4.12-4.13 -|1.8.0 |0.0.47 TP |3.10.0 GA|4.5.7 GA |2.6.3 GA |NA |2.35.1 GA |7.5.1 GA |4.10-4.13 -|1.7.0 |0.0.46 TP |3.10.0 GA|4.5.7 GA |2.5.4 GA |NA |2.35.1 GA |7.5.1 GA |4.10-4.12 -|1.6.0 |0.0.46 TP |3.8.1 GA|4.4.1 GA |2.4.5 GA |GA and included in ArgoCD component |2.30.3 GA |7.5.1 GA |4.8-4.11 -|1.5.0 |0.0.42 TP|3.8.0 GA|4.4.1 GA |2.3.3 GA |0.4.1 TP |2.30.3 GA |7.5.1 GA |4.8-4.11 -|1.4.0 |0.0.41 TP|3.7.1 GA|4.2.0 GA |2.2.2 GA |0.2.0 TP |2.30.0 GA |7.4.0 GA |4.7-4.10 -|1.3.0 |0.0.40 TP|3.6.0 GA|4.2.0 GA |2.1.2 GA |0.2.0 TP |2.28.0 GA |7.4.0 GA |4.7-4.9, 4.6 with limited GA support -|1.2.0 |0.0.38 TP |3.5.0 GA |3.9.4 GA |2.0.5 GA |0.1.0 TP |NA |7.4.0 GA|4.8 -|1.1.0 |0.0.32 TP |3.5.0 GA |3.9.4 GA |2.0.0 GA |NA |NA |NA |4.7 -|=== - -* `kam` is the {gitops-title} Application Manager command-line interface (CLI). -* RH SSO is an abbreviation for Red Hat SSO. - -// Writer, to update this support matrix, refer to https://spaces.redhat.com/display/GITOPS/GitOps+Component+Matrix - -[id="GitOps-technology-preview_{context}"] -== Technology Preview features - -The features mentioned in the following table are currently in Technology Preview (TP). These experimental features are not intended for production use. - -.Technology Preview tracker -[cols="4,1,1",options="header"] -|==== -|Feature |TP in {gitops-title} versions|GA in {gitops-title} versions - -|The custom `must-gather` tool -|1.9.0 -|NA - -|Argo Rollouts -|1.9.0 -|NA - -|ApplicationSet Progressive Rollout Strategy -|1.8.0 -|NA - -|Multiple sources for an application -|1.8.0 -|NA - -|Argo CD applications in non-control plane namespaces -|1.7.0 -|NA - -|Argo CD Notifications controller -|1.6.0 -|NA - -|The {gitops-title} *Environments* page in the *Developer* perspective of the {product-title} web console  -|1.1.0 -|NA -|==== diff --git a/modules/go-deleting-argocd-instance.adoc b/modules/go-deleting-argocd-instance.adoc deleted file mode 100644 index 72da969f9822..000000000000 --- a/modules/go-deleting-argocd-instance.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// */gitops/uninstalling-openshift-gitops.adoc - -:_content-type: PROCEDURE -[id='go-deleting-argocd-instance_{context}'] -= Deleting the Argo CD instances - -Delete the Argo CD instances added to the namespace of the GitOps Operator. - -[discrete] -.Procedure -. In the *Terminal* type the following command: - -[source,terminal] ----- -$ oc delete gitopsservice cluster -n openshift-gitops ----- - -[NOTE] -==== -You cannot delete an Argo CD cluster from the web console UI. -==== - -After the command runs successfully all the Argo CD instances will be deleted from the `openshift-gitops` namespace. - -Delete any other Argo CD instances from other namespaces using the same command: - -[source,terminal] ----- -$ oc delete gitopsservice cluster -n ----- diff --git a/modules/go-health-monitoring.adoc b/modules/go-health-monitoring.adoc deleted file mode 100644 index 20619f141571..000000000000 --- a/modules/go-health-monitoring.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: PROCEDURE -[id="health-information-resources_{context}"] -= Checking health information - -The {gitops-title} Operator will install the GitOps backend service in the `openshift-gitops` namespace. - -.Prerequisites - -* The {gitops-title} Operator is installed from *OperatorHub*. -* Ensure that your applications are synchronized by Argo CD. - -.Procedure - -. Click *Environments* under the *Developer* perspective. The *Environments* page shows the list of applications along with their *Environment status*. - -. Hover over the icons under the *Environment status* column to see the synchronization status of all the environments. - -. Click the application name from the list to view the details of a specific application. - -. In the *Application environments* page, if the *Resources* section under the *Overview* tab displays icons, hover over the icons to get status details. -** A broken heart indicates that resource issues have degraded the application's performance. -** A yellow yield sign indicates that resource issues have delayed data about the application's health. - -. To view the deployment history of an application, click the *Deployment History* tab. The page includes details such as the *Last deployment*, *Description* (commit message), *Environment*, *Author*, and *Revision*. \ No newline at end of file diff --git a/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc b/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc deleted file mode 100644 index 0f8fb1bd9a17..000000000000 --- a/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assembly: -// -// * gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc - -:_content-type: PROCEDURE -[id="run-argo-cd-instance-on-cluster_{context}"] - -= Running the Argo CD instance at the cluster-level - -The default Argo CD instance and the accompanying controllers, installed by the {gitops-title} Operator, can now run on the infrastructure nodes of the cluster by setting a simple configuration toggle. - -[discrete] -.Procedure -. Label the existing nodes: -+ -[source,terminal] ----- -$ oc label node node-role.kubernetes.io/infra="" ----- -+ -. Optional: If required, you can also apply taints and isolate the workloads on infrastructure nodes and prevent other workloads from scheduling on these nodes: -+ -[source,terminal] ----- -$ oc adm taint nodes -l node-role.kubernetes.io/infra \ -infra=reserved:NoSchedule infra=reserved:NoExecute ----- -. Add the `runOnInfra` toggle in the `GitOpsService` custom resource: -+ -[source,yaml] ----- -apiVersion: pipelines.openshift.io/v1alpha1 -kind: GitopsService -metadata: - name: cluster -spec: - runOnInfra: true ----- -. Optional: If taints have been added to the nodes, then add `tolerations` to the `GitOpsService` custom resource, for example: -+ -[source,yaml] ----- - spec: - runOnInfra: true - tolerations: - - effect: NoSchedule - key: infra - value: reserved - - effect: NoExecute - key: infra - value: reserved ----- -. Verify that the workloads in the `openshift-gitops` namespace are now scheduled on the infrastructure nodes by viewing *Pods* -> *Pod details* for any pod in the console UI. - -[NOTE] -==== -Any `nodeSelectors` and `tolerations` manually added to the default Argo CD custom resource are overwritten by the toggle and `tolerations` in the `GitOpsService` custom resource. -==== diff --git a/modules/go-uninstalling-gitops-operator.adoc b/modules/go-uninstalling-gitops-operator.adoc deleted file mode 100644 index 8e87f570c059..000000000000 --- a/modules/go-uninstalling-gitops-operator.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// */gitops/uninstalling-openshift-gitops.adoc - -:_content-type: PROCEDURE -[id='go-uninstalling-gitops-operator_{context}'] -= Uninstalling the GitOps Operator - -[discrete] -.Procedure -. From the *Operators* -> *OperatorHub* page, use the *Filter by keyword* box to search for `{gitops-title} Operator` tile. - -. Click the *Red Hat OpenShift GitOps Operator* tile. The Operator tile indicates it is installed. - -. In the *Red Hat OpenShift GitOps Operator* descriptor page, click *Uninstall*. diff --git a/modules/graceful-restart.adoc b/modules/graceful-restart.adoc deleted file mode 100644 index 43415b2a0f15..000000000000 --- a/modules/graceful-restart.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/graceful-cluster-restart.adoc - -:_content-type: PROCEDURE -[id="graceful-restart_{context}"] -= Restarting the cluster - -You can restart your cluster after it has been shut down gracefully. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* This procedure assumes that you gracefully shut down the cluster. - -.Procedure - -. Power on any cluster dependencies, such as external storage or an LDAP server. - -. Start all cluster machines. -+ -Use the appropriate method for your cloud environment to start the machines, for example, from your cloud provider's web console. -+ -Wait approximately 10 minutes before continuing to check the status of control plane nodes. - -. Verify that all control plane nodes are ready. -+ -[source,terminal] ----- -$ oc get nodes -l node-role.kubernetes.io/master ----- -+ -The control plane nodes are ready if the status is `Ready`, as shown in the following output: -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-168-251.ec2.internal Ready master 75m v1.27.3 -ip-10-0-170-223.ec2.internal Ready master 75m v1.27.3 -ip-10-0-211-16.ec2.internal Ready master 75m v1.27.3 ----- - -. If the control plane nodes are _not_ ready, then check whether there are any pending certificate signing requests (CSRs) that must be approved. - -.. Get the list of current CSRs: -+ -[source,terminal] ----- -$ oc get csr ----- - -.. Review the details of a CSR to verify that it is valid: -+ -[source,terminal] ----- -$ oc describe csr <1> ----- -<1> `` is the name of a CSR from the list of current CSRs. - -.. Approve each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve ----- - -. After the control plane nodes are ready, verify that all worker nodes are ready. -+ -[source,terminal] ----- -$ oc get nodes -l node-role.kubernetes.io/worker ----- -+ -The worker nodes are ready if the status is `Ready`, as shown in the following output: -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-179-95.ec2.internal Ready worker 64m v1.27.3 -ip-10-0-182-134.ec2.internal Ready worker 64m v1.27.3 -ip-10-0-250-100.ec2.internal Ready worker 64m v1.27.3 ----- - -. If the worker nodes are _not_ ready, then check whether there are any pending certificate signing requests (CSRs) that must be approved. - -.. Get the list of current CSRs: -+ -[source,terminal] ----- -$ oc get csr ----- - -.. Review the details of a CSR to verify that it is valid: -+ -[source,terminal] ----- -$ oc describe csr <1> ----- -<1> `` is the name of a CSR from the list of current CSRs. - -.. Approve each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve ----- - -. Verify that the cluster started properly. - -.. Check that there are no degraded cluster Operators. -+ -[source,terminal] ----- -$ oc get clusteroperators ----- -+ -Check that there are no cluster Operators with the `DEGRADED` condition set to `True`. -+ -[source,terminal,subs="attributes+"] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication {product-version}.0 True False False 59m -cloud-credential {product-version}.0 True False False 85m -cluster-autoscaler {product-version}.0 True False False 73m -config-operator {product-version}.0 True False False 73m -console {product-version}.0 True False False 62m -csi-snapshot-controller {product-version}.0 True False False 66m -dns {product-version}.0 True False False 76m -etcd {product-version}.0 True False False 76m -... ----- - -.. Check that all nodes are in the `Ready` state: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -Check that the status for all nodes is `Ready`. -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-168-251.ec2.internal Ready master 82m v1.27.3 -ip-10-0-170-223.ec2.internal Ready master 82m v1.27.3 -ip-10-0-179-95.ec2.internal Ready worker 70m v1.27.3 -ip-10-0-182-134.ec2.internal Ready worker 70m v1.27.3 -ip-10-0-211-16.ec2.internal Ready master 82m v1.27.3 -ip-10-0-250-100.ec2.internal Ready worker 69m v1.27.3 ----- - -If the cluster did not start properly, you might need to restore your cluster using an etcd backup. diff --git a/modules/graceful-shutdown.adoc b/modules/graceful-shutdown.adoc deleted file mode 100644 index 8f7199a21804..000000000000 --- a/modules/graceful-shutdown.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/graceful-cluster-shutdown.adoc - -:_content-type: PROCEDURE -[id="graceful-shutdown_{context}"] -= Shutting down the cluster - -You can shut down your cluster in a graceful manner so that it can be restarted at a later date. - -[NOTE] -==== -You can shut down a cluster until a year from the installation date and expect it to restart gracefully. After a year from the installation date, the cluster certificates expire. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have taken an etcd backup. -+ -[IMPORTANT] -==== -It is important to take an etcd backup before performing this procedure so that your cluster can be restored if you encounter any issues when restarting the cluster. - -For example, the following conditions can cause the restarted cluster to malfunction: - -* etcd data corruption during shutdown -* Node failure due to hardware -* Network connectivity issues - -If your cluster fails to recover, follow the steps to restore to a previous cluster state. -==== - -.Procedure - -. If you plan to shut down the cluster for an extended period of time, determine the date that cluster certificates expire. -+ -You must restart the cluster prior to the date that certificates expire. As the cluster restarts, the process might require you to manually approve the pending certificate signing requests (CSRs) to recover kubelet certificates. - -.. Check the expiration date for the `kube-apiserver-to-kubelet-signer` CA certificate: -+ -[source,terminal] ----- -$ oc -n openshift-kube-apiserver-operator get secret kube-apiserver-to-kubelet-signer -o jsonpath='{.metadata.annotations.auth\.openshift\.io/certificate-not-after}{"\n"}' ----- -+ -.Example output -[source,terminal] ----- -2023-08-05T14:37:50Z ----- - -.. Check the expiration date for the kubelet certificates: - -... Start a debug session for a control plane node by running the following command: -+ -[source,terminal] ----- -$ oc debug node/ ----- - -... Change your root directory to `/host` by running the following command: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -... Check the kubelet client certificate expiration date by running the following command: -+ -[source,terminal] ----- -sh-5.1# openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -enddate ----- -+ -.Example output -[source,terminal] ----- -notAfter=Jun 6 10:50:07 2023 GMT ----- - -... Check the kubelet server certificate expiration date by running the following command: -+ -[source,terminal] ----- -sh-5.1# openssl x509 -in /var/lib/kubelet/pki/kubelet-server-current.pem -noout -enddate ----- -+ -.Example output -[source,terminal] ----- -notAfter=Jun 6 10:50:07 2023 GMT ----- - -... Exit the debug session. - -... Repeat these steps to check certificate expiration dates on all control plane nodes. To ensure that the cluster can restart gracefully, plan to restart it before the earliest certificate expiration date. - -. Shut down all of the nodes in the cluster. You can do this from your cloud provider's web console, or run the following loop: -+ -[source,terminal] ----- -$ for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do oc debug node/${node} -- chroot /host shutdown -h 1; done <1> ----- -<1> `-h 1` indicates how long, in minutes, this process lasts before the control-plane nodes are shut down. For large-scale clusters with 10 nodes or more, set to 10 minutes or longer to make sure all the compute nodes have time to shut down first. -+ -.Example output ----- -Starting pod/ip-10-0-130-169us-east-2computeinternal-debug ... -To use host binaries, run `chroot /host` -Shutdown scheduled for Mon 2021-09-13 09:36:17 UTC, use 'shutdown -c' to cancel. - -Removing debug pod ... -Starting pod/ip-10-0-150-116us-east-2computeinternal-debug ... -To use host binaries, run `chroot /host` -Shutdown scheduled for Mon 2021-09-13 09:36:29 UTC, use 'shutdown -c' to cancel. ----- -+ -Shutting down the nodes using one of these methods allows pods to terminate gracefully, which reduces the chance for data corruption. -+ -[NOTE] -==== -Adjust the shut down time to be longer for large-scale clusters: -[source,terminal] ----- -$ for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do oc debug node/${node} -- chroot /host shutdown -h 10; done ----- -==== -+ -[NOTE] -==== -It is not necessary to drain control plane nodes of the standard pods that ship with {product-title} prior to shutdown. - -Cluster administrators are responsible for ensuring a clean restart of their own workloads after the cluster is restarted. If you drained control plane nodes prior to shutdown because of custom workloads, you must mark the control plane nodes as schedulable before the cluster will be functional again after restart. -==== - -. Shut off any cluster dependencies that are no longer needed, such as external storage or an LDAP server. Be sure to consult your vendor's documentation before doing so. -+ -[IMPORTANT] -==== -If you deployed your cluster on a cloud-provider platform, do not shut down, suspend, or delete the associated cloud resources. If you delete the cloud resources of a suspended virtual machine, {product-title} might not restore successfully. -==== diff --git a/modules/helm-adding-helm-chart-repositories.adoc b/modules/helm-adding-helm-chart-repositories.adoc deleted file mode 100644 index c2e708f494c7..000000000000 --- a/modules/helm-adding-helm-chart-repositories.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: PROCEDURE -[id="adding-helm-chart-repositories_{context}"] -= Adding custom Helm chart repositories - -As a cluster administrator, you can add custom Helm chart repositories to your cluster and enable access to the Helm charts from these repositories in the *Developer Catalog*. - -.Procedure - -. To add a new Helm Chart Repository, you must add the Helm Chart Repository custom resource (CR) to your cluster. -+ -.Sample Helm Chart Repository CR - -[source,yaml] ----- -apiVersion: helm.openshift.io/v1beta1 -kind: HelmChartRepository -metadata: - name: -spec: - # optional name that might be used by console - # name: - connectionConfig: - url: ----- -+ -For example, to add an Azure sample chart repository, run: -+ -[source,terminal] ----- -$ cat < -spec: - url: https://my.chart-repo.org/stable - - # optional name that might be used by console - name: - - # optional and only needed for UI purposes - description: - - # required: chart repository URL - connectionConfig: - url: ----- -+ -For example, to add an Azure sample chart repository scoped to your `my-namespace` namespace, run: -+ -[source,terminal] ----- -$ cat < -name: nodejs-ex-k <2> -description: A Helm chart for OpenShift <3> -icon: https://static.redhat.com/libs/redhat/brand-assets/latest/corp/logo.svg <4> -version: 0.2.1 <5> ----- -+ -<1> The chart API version. It should be `v2` for Helm charts that require at least Helm 3. -<2> The name of your chart. -<3> The description of your chart. -<4> The URL to an image to be used as an icon. -<5> The Version of your chart as per the Semantic Versioning (SemVer) 2.0.0 Specification. - -. Verify that the chart is formatted properly: -+ -[source,terminal] ----- -$ helm lint ----- -+ -.Example output -[source,terminal] ----- -[INFO] Chart.yaml: icon is recommended - -1 chart(s) linted, 0 chart(s) failed ----- - -. Navigate to the previous directory level: -+ -[source,terminal] ----- -$ cd .. ----- - -. Install the chart: -+ -[source,terminal] ----- -$ helm install nodejs-chart nodejs-ex-k ----- - -. Verify that the chart has installed successfully: -+ -[source,terminal] ----- -$ helm list ----- -+ -.Example output -[source,terminal] ----- -NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION -nodejs-chart nodejs-ex-k 1 2019-12-05 15:06:51.379134163 -0500 EST deployed nodejs-0.1.0 1.16.0 ----- diff --git a/modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc b/modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc deleted file mode 100644 index 43a6f87231ca..000000000000 --- a/modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc +++ /dev/null @@ -1,86 +0,0 @@ -:_content-type: PROCEDURE -[id="creating-credentials-and-certificates-to-add-helm-repositories_{context}"] -= Creating credentials and CA certificates to add Helm chart repositories - -Some Helm chart repositories need credentials and custom certificate authority (CA) certificates to connect to it. You can use the web console as well as the CLI to add credentials and certificates. - -.Procedure -To configure the credentials and certificates, and then add a Helm chart repository using the CLI: - -. In the `openshift-config` namespace, create a `ConfigMap` object with a custom CA certificate in PEM encoded format, and store it under the `ca-bundle.crt` key within the config map: -+ -[source,terminal] ----- -$ oc create configmap helm-ca-cert \ ---from-file=ca-bundle.crt=/path/to/certs/ca.crt \ --n openshift-config ----- -+ -. In the `openshift-config` namespace, create a `Secret` object to add the client TLS configurations: -+ -[source,terminal] ----- -$ oc create secret tls helm-tls-configs \ ---cert=/path/to/certs/client.crt \ ---key=/path/to/certs/client.key \ --n openshift-config ----- -+ -Note that the client certificate and key must be in PEM encoded format and stored under the keys `tls.crt` and `tls.key`, respectively. - -. Add the Helm repository as follows: -+ -[source,terminal] ----- -$ cat < -spec: - name: - connectionConfig: - url: - tlsConfig: - name: helm-tls-configs - ca: - name: helm-ca-cert -EOF ----- -+ -The `ConfigMap` and `Secret` are consumed in the HelmChartRepository CR using the `tlsConfig` and `ca` fields. These certificates are used to connect to the Helm repository URL. -. By default, all authenticated users have access to all configured charts. However, for chart repositories where certificates are needed, you must provide users with read access to the `helm-ca-cert` config map and `helm-tls-configs` secret in the `openshift-config` namespace, as follows: -+ -[source,terminal] ----- -$ cat < - disabled: true ----- -+ -The repository is now disabled and will not appear in the catalog. diff --git a/modules/helm-filtering-helm-charts-by-certification-level.adoc b/modules/helm-filtering-helm-charts-by-certification-level.adoc deleted file mode 100644 index b7f030d649f3..000000000000 --- a/modules/helm-filtering-helm-charts-by-certification-level.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: PROCEDURE -[id="filtering-helm-charts-by-certification-level_{context}"] -= Filtering Helm Charts by their certification level - -You can filter Helm charts based on their certification level in the *Developer Catalog*. - -.Procedure - -. In the *Developer* perspective, navigate to the *+Add* view and select a project. - -. From the *Developer Catalog* tile, select the *Helm Chart* option to see all the Helm charts in the *Developer Catalog*. - -. Use the filters to the left of the list of Helm charts to filter the required charts: -* Use the *Chart Repositories* filter to filter charts provided by *Red Hat Certification Charts* or *OpenShift Helm Charts*. -* Use the *Source* filter to filter charts sourced from *Partners*, *Community*, or *Red Hat*. Certified charts are indicated with the (image:odc_verified_icon.png[title="Certified icon"]) icon. - -[NOTE] -==== -The *Source* filter will not be visible when there is only one provider type. -==== - -You can now select the required chart and install it. diff --git a/modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc b/modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc deleted file mode 100644 index fe43cdb4dbae..000000000000 --- a/modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_content-type: PROCEDURE -[id="installing-a-helm-chart-on-an-openshift-cluster_{context}"] - -= Installing a Helm chart on an {product-title} cluster - -.Prerequisites -* You have a running {product-title} cluster and you have logged into it. -* You have installed Helm. - -.Procedure - -. Create a new project: -+ -[source,terminal] ----- -$ oc new-project vault ----- - -. Add a repository of Helm charts to your local Helm client: -+ -[source,terminal] ----- -$ helm repo add openshift-helm-charts https://charts.openshift.io/ ----- -+ -.Example output -[source,terminal] ----- -"openshift-helm-charts" has been added to your repositories ----- - -. Update the repository: -+ -[source,terminal] ----- -$ helm repo update ----- - -. Install an example HashiCorp Vault: -+ -[source,terminal] ----- -$ helm install example-vault openshift-helm-charts/hashicorp-vault ----- -+ -.Example output -[source,terminal] ----- -NAME: example-vault -LAST DEPLOYED: Fri Mar 11 12:02:12 2022 -NAMESPACE: vault -STATUS: deployed -REVISION: 1 -NOTES: -Thank you for installing HashiCorp Vault! ----- - -. Verify that the chart has installed successfully: -+ -[source,terminal] ----- -$ helm list ----- -+ -.Example output -[source,terminal] ----- -NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION -example-vault vault 1 2022-03-11 12:02:12.296226673 +0530 IST deployed vault-0.19.0 1.9.2 ----- diff --git a/modules/hosted-control-planes-concepts-personas.adoc b/modules/hosted-control-planes-concepts-personas.adoc deleted file mode 100644 index d10d8bd282b0..000000000000 --- a/modules/hosted-control-planes-concepts-personas.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc -// * hosted-control-planes/index.adoc - - -:_content-type: CONCEPT -[id="hosted-control-planes-concepts-personas_{context}"] -= Glossary of common concepts and personas for hosted control planes - -When you use hosted control planes for {product-title}, it is important to understand its key concepts and the personas that are involved. - -[id="hosted-control-planes-concepts_{context}"] -== Concepts - -hosted cluster:: An {product-title} API endpoint for the tenant cluster that is managed by the HyperShift Operator. - -hosted cluster infrastructure:: Network, compute, and storage resources that exist in the tenant or end-user cloud account. - -hosted control plane:: An {product-title} control plane that runs on the management cluster, which is exposed by the API endpoint of a hosted cluster. The components of a control plane include etcd, the Kubernetes API server, the Kubernetes controller manager, and a VPN. - -hosting cluster:: See _management cluster_. - -management cluster:: An {product-title} cluster where the HyperShift Operator is deployed and where the control planes for tenant clusters are hosted. The management cluster is synonymous with the _hosting cluster_. - -management cluster infrastructure:: Network, compute, and storage resources of the management cluster. - -[id="hosted-control-planes-personas_{context}"] -== Personas - -cluster instance administrator:: Users who assume this role are the equivalent of administrators in standalone {product-title}. This user has the `cluster-admin` role in the provisioned cluster, but might not have power over when or how the cluster is updated or configured. This user might have read-only access to see some configuration projected into the cluster. - -cluster instance user:: Users who assume this role are the equivalent of developers in standalone {product-title}. This user does not have a view into OperatorHub or machines. - -cluster service consumer:: Users who assume this role can request control planes and worker nodes, drive updates, or modify externalized configurations. Typically, this user does not manage or access cloud credentials or infrastructure encryption keys. The cluster service consumer persona can request hosted clusters and interact with node pools. Users who assume this role have RBAC to create, read, update, or delete hosted clusters and node pools within a logical boundary. - -cluster service provider:: Users who assume this role typically have the `cluster-admin` role on the management cluster and have RBAC to monitor and own the availability of the HyperShift Operator as well as the control planes for the tenant's hosted clusters. The cluster service provider persona is responsible for several activities, including the following examples: -** Owning service-level objects for control plane availability, uptime, and stability -** Configuring the cloud account for the management cluster to host control planes -** Configuring the user-provisioned infrastructure, which includes the host awareness of available compute resources - - - diff --git a/modules/hosted-control-planes-overview.adoc b/modules/hosted-control-planes-overview.adoc deleted file mode 100644 index 5393edd5cbd6..000000000000 --- a/modules/hosted-control-planes-overview.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc -// * hosted-control-planes/index.adoc - - -:_content-type: CONCEPT -[id="hosted-control-planes-overview_{context}"] -= Introduction to hosted control planes (Technology Preview) - -You can use hosted control planes for Red Hat {product-title} to reduce management costs, optimize cluster deployment time, and separate management and workload concerns so that you can focus on your applications. - -You can enable hosted control planes as a Technology Preview feature by using the link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#cluster_mce_overview[multicluster engine for Kubernetes operator version 2.0 or later] on Amazon Web Services (AWS), bare metal by using the Agent provider, or {VirtProductName}. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -[id="hosted-control-planes-architecture_{context}"] -== Architecture of hosted control planes - -{product-title} is often deployed in a coupled, or standalone, model, where a cluster consists of a control plane and a data plane. The control plane includes an API endpoint, a storage endpoint, a workload scheduler, and an actuator that ensures state. The data plane includes compute, storage, and networking where workloads and applications run. - -The standalone control plane is hosted by a dedicated group of nodes, which can be physical or virtual, with a minimum number to ensure quorum. The network stack is shared. Administrator access to a cluster offers visibility into the cluster's control plane, machine management APIs, and other components that contribute to the state of a cluster. - -Although the standalone model works well, some situations require an architecture where the control plane and data plane are decoupled. In those cases, the data plane is on a separate network domain with a dedicated physical hosting environment. The control plane is hosted by using high-level primitives such as deployments and stateful sets that are native to Kubernetes. The control plane is treated as any other workload. - -image::hosted-control-planes-diagram.png[Diagram that compares the hosted control plane model against OpenShift with a coupled control plane and workers] - -[id="hosted-control-planes-benefits_{context}"] -== Benefits of hosted control planes - -With hosted control planes for {product-title}, you can pave the way for a true hybrid-cloud approach and enjoy several other benefits. - -* The security boundaries between management and workloads are stronger because the control plane is decoupled and hosted on a dedicated hosting service cluster. As a result, you are less likely to leak credentials for clusters to other users. Because infrastructure secret account management is also decoupled, cluster infrastructure administrators cannot accidentally delete control plane infrastructure. - -* With hosted control planes, you can run many control planes on fewer nodes. As a result, clusters are more affordable. - -* Because the control planes consist of pods that are launched on {product-title}, control planes start quickly. The same principles apply to control planes and workloads, such as monitoring, logging, and auto-scaling. - -* From an infrastructure perspective, you can push registries, HAProxy, cluster monitoring, storage nodes, and other infrastructure components to the tenant's cloud provider account, isolating usage to the tenant. - -* From an operational perspective, multicluster management is more centralized, which results in fewer external factors that affect the cluster status and consistency. Site reliability engineers have a central place to debug issues and navigate to the cluster data plane, which can lead to shorter Time to Resolution (TTR) and greater productivity. \ No newline at end of file diff --git a/modules/hosted-control-planes-pause-reconciliation.adoc b/modules/hosted-control-planes-pause-reconciliation.adoc deleted file mode 100644 index 28ceab1f69c4..000000000000 --- a/modules/hosted-control-planes-pause-reconciliation.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * hosted_control_planes/hcp-managing.adoc - -:_content-type: PROCEDURE -[id="hosted-control-planes-pause-reconciliation_{context}"] -= Pausing the reconciliation of a hosted cluster and hosted control plane - -If you are a cluster instance administrator, you can pause the reconciliation of a hosted cluster and hosted control plane. You might want to pause reconciliation when you back up and restore an etcd database or when you need to debug problems with a hosted cluster or hosted control plane. - -.Procedure - -. To pause reconciliation for a hosted cluster and hosted control plane, populate the `pausedUntil` field of the `HostedCluster` resource, as shown in the following examples: -+ -** To pause the reconciliation until a specific time, specify an RFC339 timestamp: -+ -[source,terminal] ----- -PAUSED_UNTIL="2022-03-03T03:28:48Z" -kubectl patch -n hostedclusters/ -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge ----- -+ -The reconciliation is paused until the specified time is passed. -+ -** To pause the reconciliation indefinitely, pass a Boolean value of `true`: -+ -[source,terminal] ----- -PAUSED_UNTIL="true" -kubectl patch -n hostedclusters/ -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge ----- -+ -The reconciliation is paused until you remove the field from the `HostedCluster` resource. -+ -When the pause reconciliation field is populated for the `HostedCluster` resource, the field is automatically added to the associated `HostedControlPlane` resource. - -. To remove the `pausedUntil` field, enter the following patch command: -+ -[source,terminal] ----- -kubectl patch -n hostedclusters/ -p '{"spec":{"pausedUntil":null}}' --type=merge ----- - - - - - diff --git a/modules/hosted-control-planes-version-support.adoc b/modules/hosted-control-planes-version-support.adoc deleted file mode 100644 index 754c9a5e0c1b..000000000000 --- a/modules/hosted-control-planes-version-support.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc -// * hosted-control-planes/index.adoc - - -:_content-type: CONCEPT -[id="hosted-control-planes-version-support_{context}"] -= Versioning for hosted control planes - -With each major, minor, or patch version release of {product-title}, two components of hosted control planes are released: - -* HyperShift Operator -* Command-line interface (CLI) - -The HyperShift Operator manages the lifecycle of hosted clusters that are represented by `HostedCluster` API resources. The HyperShift Operator is released with each {product-title} release. After the HyperShift Operator is installed, it creates a config map called `supported-versions` in the HyperShift namespace, as shown in the following example. The config map describes the HostedCluster versions that can be deployed. - -[source,yaml] ----- - apiVersion: v1 - data: - supported-versions: '{"versions":["4.13","4.12","4.11"]}' - kind: ConfigMap - metadata: - labels: - hypershift.openshift.io/supported-versions: "true" - name: supported-versions - namespace: hypershift ----- - -The CLI is a helper utility for development purposes. The CLI is released as part of any HyperShift Operator release. No compatibility policies are guaranteed. - -The API, `hypershift.openshift.io`, provides a way to create and manage lightweight, flexible, heterogeneous {product-title} clusters at scale. The API exposes two user-facing resources: `HostedCluster` and `NodePool`. A `HostedCluster` resource encapsulates the control plane and common data plane configuration. When you create a `HostedCluster` resource, you have a fully functional control plane with no attached nodes. A `NodePool` resource is a scalable set of worker nodes that is attached to a `HostedCluster` resource. - -The API version policy generally aligns with the policy for link:https://kubernetes.io/docs/reference/using-api/#api-versioning[Kubernetes API versioning]. - - - diff --git a/modules/how-huge-pages-are-consumed-by-apps.adoc b/modules/how-huge-pages-are-consumed-by-apps.adoc deleted file mode 100644 index 70670301b09d..000000000000 --- a/modules/how-huge-pages-are-consumed-by-apps.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="how-huge-pages-are-consumed-by-apps_{context}"] -= How huge pages are consumed by apps - -Nodes must pre-allocate huge pages in order for the node to report its huge page -capacity. A node can only pre-allocate huge pages for a single size. - -Huge pages can be consumed through container-level resource requirements using the -resource name `hugepages-`, where size is the most compact binary -notation using integer values supported on a particular node. For example, if a -node supports 2048KiB page sizes, it exposes a schedulable resource -`hugepages-2Mi`. Unlike CPU or memory, huge pages do not support over-commitment. - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - generateName: hugepages-volume- -spec: - containers: - - securityContext: - privileged: true - image: rhel7:latest - command: - - sleep - - inf - name: example - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - resources: - limits: - hugepages-2Mi: 100Mi <1> - memory: "1Gi" - cpu: "1" - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> Specify the amount of memory for `hugepages` as the exact amount to be -allocated. Do not specify this value as the amount of memory for `hugepages` -multiplied by the size of the page. For example, given a huge page size of 2MB, -if you want to use 100MB of huge-page-backed RAM for your application, then you -would allocate 50 huge pages. {product-title} handles the math for you. As in -the above example, you can specify `100MB` directly. - -*Allocating huge pages of a specific size* - -Some platforms support multiple huge page sizes. To allocate huge pages of a -specific size, precede the huge pages boot command parameters with a huge page -size selection parameter `hugepagesz=`. The `` value must be -specified in bytes with an optional scale suffix [`kKmMgG`]. The default huge -page size can be defined with the `default_hugepagesz=` boot parameter. - -*Huge page requirements* - -* Huge page requests must equal the limits. This is the default if limits are -specified, but requests are not. - -* Huge pages are isolated at a pod scope. Container isolation is planned in a -future iteration. - -* `EmptyDir` volumes backed by huge pages must not consume more huge page memory -than the pod request. - -* Applications that consume huge pages via `shmget()` with `SHM_HUGETLB` must run -with a supplemental group that matches *_proc/sys/vm/hugetlb_shm_group_*. diff --git a/modules/how-to-plan-your-environment-according-to-application-requirements.adoc b/modules/how-to-plan-your-environment-according-to-application-requirements.adoc deleted file mode 100644 index 67578dbdc63b..000000000000 --- a/modules/how-to-plan-your-environment-according-to-application-requirements.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/planning-your-environment-according-to-object-maximums.adoc - -[id="how-to-plan-according-to-application-requirements_{context}"] -= How to plan your environment according to application requirements - -Consider an example application environment: - -[options="header",cols="5"] -|=== -|Pod type |Pod quantity |Max memory |CPU cores |Persistent storage - -|apache -|100 -|500 MB -|0.5 -|1 GB - -|node.js -|200 -|1 GB -|1 -|1 GB - -|postgresql -|100 -|1 GB -|2 -|10 GB - -|JBoss EAP -|100 -|1 GB -|1 -|1 GB -|=== - -Extrapolated requirements: 550 CPU cores, 450GB RAM, and 1.4TB storage. - -Instance size for nodes can be modulated up or down, depending on your -preference. Nodes are often resource overcommitted. In this deployment -scenario, you can choose to run additional smaller nodes or fewer larger nodes -to provide the same amount of resources. Factors such as operational agility and -cost-per-instance should be considered. - -[options="header",cols="4"] -|=== -|Node type |Quantity |CPUs |RAM (GB) - -|Nodes (option 1) -|100 -|4 -|16 - -|Nodes (option 2) -|50 -|8 -|32 - -|Nodes (option 3) -|25 -|16 -|64 -|=== - -Some applications lend themselves well to overcommitted environments, and some -do not. Most Java applications and applications that use huge pages are examples -of applications that would not allow for overcommitment. That memory can not be -used for other applications. In the example above, the environment would be -roughly 30 percent overcommitted, a common ratio. - -The application pods can access a service either by using environment variables or DNS. -If using environment variables, for each active service the variables are injected by the -kubelet when a pod is run on a node. A cluster-aware DNS server watches the Kubernetes API -for new services and creates a set of DNS records for each one. If DNS is enabled throughout -your cluster, then all pods should automatically be able to resolve services by their DNS name. -Service discovery using DNS can be used in case you must go beyond 5000 services. When using -environment variables for service discovery, the argument list exceeds the allowed length after -5000 services in a namespace, then the pods and deployments will start failing. Disable the service -links in the deployment's service specification file to overcome this: - -[source,yaml] ----- ---- -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: deployment-config-template - creationTimestamp: - annotations: - description: This template will create a deploymentConfig with 1 replica, 4 env vars and a service. - tags: '' -objects: -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - name: deploymentconfig${IDENTIFIER} - spec: - template: - metadata: - labels: - name: replicationcontroller${IDENTIFIER} - spec: - enableServiceLinks: false - containers: - - name: pause${IDENTIFIER} - image: "${IMAGE}" - ports: - - containerPort: 8080 - protocol: TCP - env: - - name: ENVVAR1_${IDENTIFIER} - value: "${ENV_VALUE}" - - name: ENVVAR2_${IDENTIFIER} - value: "${ENV_VALUE}" - - name: ENVVAR3_${IDENTIFIER} - value: "${ENV_VALUE}" - - name: ENVVAR4_${IDENTIFIER} - value: "${ENV_VALUE}" - resources: {} - imagePullPolicy: IfNotPresent - capabilities: {} - securityContext: - capabilities: {} - privileged: false - restartPolicy: Always - serviceAccount: '' - replicas: 1 - selector: - name: replicationcontroller${IDENTIFIER} - triggers: - - type: ConfigChange - strategy: - type: Rolling -- apiVersion: v1 - kind: Service - metadata: - name: service${IDENTIFIER} - spec: - selector: - name: replicationcontroller${IDENTIFIER} - ports: - - name: serviceport${IDENTIFIER} - protocol: TCP - port: 80 - targetPort: 8080 - clusterIP: '' - type: ClusterIP - sessionAffinity: None - status: - loadBalancer: {} -parameters: -- name: IDENTIFIER - description: Number to append to the name of resources - value: '1' - required: true -- name: IMAGE - description: Image to use for deploymentConfig - value: gcr.io/google-containers/pause-amd64:3.0 - required: false -- name: ENV_VALUE - description: Value to use for environment variables - generate: expression - from: "[A-Za-z0-9]{255}" - required: false -labels: - template: deployment-config-template ----- - -The number of application pods that can run in a namespace is dependent on the number of services and the length of the service name when the environment variables are used for service discovery. `ARG_MAX` on the system defines the maximum argument length for a new process and it is set to 2097152 bytes (2 MiB) by default. The Kubelet injects environment variables in to each pod scheduled to run in the namespace including: - -* `_SERVICE_HOST=` -* `_SERVICE_PORT=` -* `_PORT=tcp://:` -* `_PORT__TCP=tcp://:` -* `_PORT__TCP_PROTO=tcp` -* `_PORT__TCP_PORT=` -* `_PORT__TCP_ADDR=` - -The pods in the namespace will start to fail if the argument length exceeds the allowed value and the number of -characters in a service name impacts it. For example, in a namespace with 5000 services, the limit on the service name -is 33 characters, which enables you to run 5000 pods in the namespace. diff --git a/modules/how-to-plan-your-environment-according-to-cluster-maximums.adoc b/modules/how-to-plan-your-environment-according-to-cluster-maximums.adoc deleted file mode 100644 index b410d1fd9944..000000000000 --- a/modules/how-to-plan-your-environment-according-to-cluster-maximums.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/planning-your-environment-according-to-object-maximums.adoc - -[id="how-to-plan-according-to-cluster-maximums_{context}"] -= How to plan your environment according to tested cluster maximums - -[IMPORTANT] -==== -Oversubscribing the physical resources on a node affects resource guarantees the Kubernetes scheduler makes during pod placement. Learn what measures you can take to avoid memory swapping. - -Some of the tested maximums are stretched only in a single dimension. They will vary when many objects are running on the cluster. - -The numbers noted in this documentation are based on Red Hat's test methodology, setup, configuration, and tunings. These numbers can vary based on your own individual setup and environments. -==== - -While planning your environment, determine how many pods are expected to fit per node: - ----- -required pods per cluster / pods per node = total number of nodes needed ----- - -The default maximum number of pods per node is 250. However, the number of pods that fit on a node is dependent on the application itself. Consider the application's memory, CPU, and storage requirements, as described in "How to plan your environment according to application requirements". - -.Example scenario - -If you want to scope your cluster for 2200 pods per cluster, you would need at least five nodes, assuming that there are 500 maximum pods per node: - ----- -2200 / 500 = 4.4 ----- - -If you increase the number of nodes to 20, then the pod distribution changes to 110 pods per node: - ----- -2200 / 20 = 110 ----- - -Where: - ----- -required pods per cluster / total number of nodes = expected pods per node ----- - -{product-title} comes with several system pods, such as SDN, DNS, Operators, and others, which run across every worker node by default. Therefore, the result of the above formula can vary. diff --git a/modules/hw-installing-amq-interconnect-messaging-bus.adoc b/modules/hw-installing-amq-interconnect-messaging-bus.adoc deleted file mode 100644 index 914b93dbce69..000000000000 --- a/modules/hw-installing-amq-interconnect-messaging-bus.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: PROCEDURE -[id="hw-installing-amq-interconnect-messaging-bus_{context}"] -= Installing the AMQ messaging bus - -To pass Redfish bare-metal event notifications between publisher and subscriber on a node, you can install and configure an AMQ messaging bus to run locally on the node. You do this by installing the AMQ Interconnect Operator for use in the cluster. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -.Prerequisites - -* Install the {product-title} CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -* Install the AMQ Interconnect Operator to its own `amq-interconnect` namespace. See link:https://access.redhat.com/documentation/en-us/red_hat_amq/2021.q1/html/deploying_amq_interconnect_on_openshift/adding-operator-router-ocp[Installing the AMQ Interconnect Operator]. - -.Verification - -. Verify that the AMQ Interconnect Operator is available and the required pods are running: -+ -[source,terminal] ----- -$ oc get pods -n amq-interconnect ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -amq-interconnect-645db76c76-k8ghs 1/1 Running 0 23h -interconnect-operator-5cb5fc7cc-4v7qm 1/1 Running 0 23h ----- - -. Verify that the required `bare-metal-event-relay` bare-metal event producer pod is running in the `openshift-bare-metal-events` namespace: -+ -[source,terminal] ----- -$ oc get pods -n openshift-bare-metal-events ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -hw-event-proxy-operator-controller-manager-74d5649b7c-dzgtl 2/2 Running 0 25s ----- - - - diff --git a/modules/ibm-z-boost-networking-performance-with-rfs.adoc b/modules/ibm-z-boost-networking-performance-with-rfs.adoc deleted file mode 100644 index 61ed4c0b1e39..000000000000 --- a/modules/ibm-z-boost-networking-performance-with-rfs.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: PROCEDURE -[id="ibm-z-boost-networking-performance-with-rfs_{context}"] -= Boost networking performance with Receive Flow Steering - -Receive Flow Steering (RFS) extends Receive Packet Steering (RPS) by further reducing network latency. RFS is technically based on RPS, and improves the efficiency of packet processing by increasing the CPU cache hit rate. RFS achieves this, and in addition considers queue length, by determining the most convenient CPU for computation so that cache hits are more likely to occur within the CPU. Thus, the CPU cache is invalidated less and requires fewer cycles to rebuild the cache. This can help reduce packet processing run time. - -[id="use-the-mco-to-activate-rfs_{context}"] -== Use the Machine Config Operator (MCO) to activate RFS - -.Procedure - -. Copy the following MCO sample profile into a YAML file. For example, `enable-rfs.yaml`: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 50-enable-rfs -spec: - config: - ignition: - version: 2.2.0 - storage: - files: - - contents: - source: data:text/plain;charset=US-ASCII,%23%20turn%20on%20Receive%20Flow%20Steering%20%28RFS%29%20for%20all%20network%20interfaces%0ASUBSYSTEM%3D%3D%22net%22%2C%20ACTION%3D%3D%22add%22%2C%20RUN%7Bprogram%7D%2B%3D%22/bin/bash%20-c%20%27for%20x%20in%20/sys/%24DEVPATH/queues/rx-%2A%3B%20do%20echo%208192%20%3E%20%24x/rps_flow_cnt%3B%20%20done%27%22%0A - filesystem: root - mode: 0644 - path: /etc/udev/rules.d/70-persistent-net.rules - - contents: - source: data:text/plain;charset=US-ASCII,%23%20define%20sock%20flow%20enbtried%20for%20%20Receive%20Flow%20Steering%20%28RFS%29%0Anet.core.rps_sock_flow_entries%3D8192%0A - filesystem: root - mode: 0644 - path: /etc/sysctl.d/95-enable-rps.conf ----- - -. Create the MCO profile: -+ -[source,terminal] ----- -$ oc create -f enable-rfs.yaml ----- - -. Verify that an entry named `50-enable-rfs` is listed: -+ -[source,terminal] ----- -$ oc get mc ----- - -. To deactivate, enter: -+ -[source,terminal] ----- -$ oc delete mc 50-enable-rfs ----- - diff --git a/modules/ibm-z-choose-networking-setup.adoc b/modules/ibm-z-choose-networking-setup.adoc deleted file mode 100644 index c6b72387e26d..000000000000 --- a/modules/ibm-z-choose-networking-setup.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: CONCEPT -[id="ibm-z-choose-networking-setup_{context}"] -= Choose your networking setup - -The networking stack is one of the most important components for a Kubernetes-based product like {product-title}. For {ibmzProductName} setups, the networking setup depends on the hypervisor of your choice. Depending on the workload and the application, the best fit usually changes with the use case and the traffic pattern. - -Depending on your setup, consider these best practices: - -* Consider all options regarding networking devices to optimize your traffic pattern. Explore the advantages of OSA-Express, RoCE Express, HiperSockets, z/VM VSwitch, Linux Bridge (KVM), and others to decide which option leads to the greatest benefit for your setup. -* Always use the latest available NIC version. For example, OSA Express 7S 10 GbE shows great improvement compared to OSA Express 6S 10 GbE with transactional workload types, although both are 10 GbE adapters. -* Each virtual switch adds an additional layer of latency. -* The load balancer plays an important role for network communication outside the cluster. Consider using a production-grade hardware load balancer if this is critical for your application. -* {product-title} SDN introduces flows and rules, which impact the networking performance. Make sure to consider pod affinities and placements, to benefit from the locality of services where communication is critical. -* Balance the trade-off between performance and functionality. diff --git a/modules/ibm-z-disable-thp.adoc b/modules/ibm-z-disable-thp.adoc deleted file mode 100644 index 7a706757ef98..000000000000 --- a/modules/ibm-z-disable-thp.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: CONCEPT -[id="ibm-z-disable-thp_{context}"] -= Disable Transparent Huge Pages - -Transparent Huge Pages (THP) attempt to automate most aspects of creating, managing, and using huge pages. Since THP automatically manages the huge pages, this is not always handled optimally for all types of workloads. THP can lead to performance regressions, since many applications handle huge pages on their own. Therefore, consider disabling THP. diff --git a/modules/ibm-z-ensure-high-disk-performance-hyperpav.adoc b/modules/ibm-z-ensure-high-disk-performance-hyperpav.adoc deleted file mode 100644 index acdd66b32547..000000000000 --- a/modules/ibm-z-ensure-high-disk-performance-hyperpav.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: PROCEDURE -[id="ibm-z-ensure-high-disk-performance-hyperpav_{context}"] -= Ensure high disk performance with HyperPAV on z/VM - -DASD and ECKD devices are commonly used disk types in {ibmzProductName} environments. In a typical {product-title} setup in z/VM environments, DASD disks are commonly used to support the local storage for the nodes. You can set up HyperPAV alias devices to provide more throughput and overall better I/O performance for the DASD disks that support the z/VM guests. - -Using HyperPAV for the local storage devices leads to a significant performance benefit. However, you must be aware that there is a trade-off between throughput and CPU costs. - -[id="use-the-mco-to-activate-hyperpav-aliases-in-nodes-using-zvm-full-pack-minidisks_{context}"] -== Use the Machine Config Operator (MCO) to activate HyperPAV aliases in nodes using z/VM full-pack minidisks - -For z/VM-based {product-title} setups that use full-pack minidisks, you can leverage the advantage of MCO profiles by activating HyperPAV aliases in all of the nodes. You must add YAML configurations for both control plane and compute nodes. - -.Procedure - -. Copy the following MCO sample profile into a YAML file for the control plane node. For example, `05-master-kernelarg-hpav.yaml`: -+ -[source,terminal] ----- -$ cat 05-master-kernelarg-hpav.yaml -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master - name: 05-master-kernelarg-hpav -spec: - config: - ignition: - version: 3.1.0 - kernelArguments: - - rd.dasd=800-805 ----- - -. Copy the following MCO sample profile into a YAML file for the compute node. For example, `05-worker-kernelarg-hpav.yaml`: -+ -[source,terminal] ----- -$ cat 05-worker-kernelarg-hpav.yaml -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 05-worker-kernelarg-hpav -spec: - config: - ignition: - version: 3.1.0 - kernelArguments: - - rd.dasd=800-805 ----- - -+ -[NOTE] -==== -You must modify the `rd.dasd` arguments to fit the device IDs. -==== - -. Create the MCO profiles: -+ -[source,terminal] ----- -$ oc create -f 05-master-kernelarg-hpav.yaml ----- - -+ -[source,terminal] ----- -$ oc create -f 05-worker-kernelarg-hpav.yaml ----- - -. To deactivate, enter: -+ -[source,terminal] ----- -$ oc delete -f 05-master-kernelarg-hpav.yaml ----- - -+ -[source,terminal] ----- -$ oc delete -f 05-worker-kernelarg-hpav.yaml ----- \ No newline at end of file diff --git a/modules/ibm-z-managing-cpu-overcommitment.adoc b/modules/ibm-z-managing-cpu-overcommitment.adoc deleted file mode 100644 index 5c7c9dab1cc3..000000000000 --- a/modules/ibm-z-managing-cpu-overcommitment.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: CONCEPT -[id="ibm-z-managing-cpu-overcommitment_{context}"] -= Managing CPU overcommitment - -In a highly virtualized {ibmzProductName} environment, you must carefully plan the infrastructure setup and sizing. One of the most important features of virtualization is the capability to do resource overcommitment, allocating more resources to the virtual machines than actually available at the hypervisor level. This is very workload dependent and there is no golden rule that can be applied to all setups. - -Depending on your setup, consider these best practices regarding CPU overcommitment: - -* At LPAR level (PR/SM hypervisor), avoid assigning all available physical cores (IFLs) to each LPAR. For example, with four physical IFLs available, you should not define three LPARs with four logical IFLs each. -* Check and understand LPAR shares and weights. -* An excessive number of virtual CPUs can adversely affect performance. Do not define more virtual processors to a guest than logical processors are defined to the LPAR. -* Configure the number of virtual processors per guest for peak workload, not more. -* Start small and monitor the workload. Increase the vCPU number incrementally if necessary. -* Not all workloads are suitable for high overcommitment ratios. If the workload is CPU intensive, you will probably not be able to achieve high ratios without performance problems. Workloads that are more I/O intensive can keep consistent performance even with high overcommitment ratios. \ No newline at end of file diff --git a/modules/ibm-z-rhel-kvm-host-recommendations.adoc b/modules/ibm-z-rhel-kvm-host-recommendations.adoc deleted file mode 100644 index 2ec915c7b189..000000000000 --- a/modules/ibm-z-rhel-kvm-host-recommendations.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ibm-z-recommended-host-practices.adoc - -:_content-type: PROCEDURE -[id="ibm-z-rhel-kvm-host-recommendations_{context}"] -= {op-system-base} KVM on {ibmzProductName} host recommendations - -Optimizing a KVM virtual server environment strongly depends on the workloads of the virtual servers and on the available resources. The same action that enhances performance in one environment can have adverse effects in another. Finding the best balance for a particular setting can be a challenge and often involves experimentation. - -The following section introduces some best practices when using {product-title} with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} environments. - -[id="use-multiple-queues-for-your-virtio-network-interfaces_{context}"] -== Use multiple queues for your VirtIO network interfaces - -With multiple virtual CPUs, you can transfer packages in parallel if you provide multiple queues for incoming and outgoing packets. Use the `queues` attribute of the `driver` element to configure multiple queues. Specify an integer of at least 2 that does not exceed the number of virtual CPUs of the virtual server. - -The following example specification configures two input and output queues for a network interface: - -[source,xml] ----- - - - - - ----- - -Multiple queues are designed to provide enhanced performance for a network interface, but they also use memory and CPU resources. Start with defining two queues for busy interfaces. Next, try two queues for interfaces with less traffic or more than two queues for busy interfaces. - -[id="use-io-threads-for-your-virtual-block-devices_{context}"] -== Use I/O threads for your virtual block devices - -To make virtual block devices use I/O threads, you must configure one or more I/O threads for the virtual server and each virtual block device to use one of these I/O threads. - -The following example specifies `3` to configure three I/O threads, with consecutive decimal thread IDs 1, 2, and 3. The `iothread="2"` parameter specifies the driver element of the disk device to use the I/O thread with ID 2. - - -.Sample I/O thread specification -[source,xml] ----- -... - - 3<1> - ... - - ... - <2> - - - ... - - ... - ----- -<1> The number of I/O threads. -<2> The driver element of the disk device. - -Threads can increase the performance of I/O operations for disk devices, but they also use memory and CPU resources. You can configure multiple devices to use the same thread. The best mapping of threads to devices depends on the available resources and the workload. - -Start with a small number of I/O threads. Often, a single I/O thread for all disk devices is sufficient. Do not configure more threads than the number of virtual CPUs, and do not configure idle threads. - -You can use the `virsh iothreadadd` command to add I/O threads with specific thread IDs to a running virtual server. - -[id="avoid-virtual-scsi-devices_{context}"] -== Avoid virtual SCSI devices - -Configure virtual SCSI devices only if you need to address the device through SCSI-specific interfaces. Configure disk space as virtual block devices rather than virtual SCSI devices, regardless of the backing on the host. - -However, you might need SCSI-specific interfaces for: - -* A LUN for a SCSI-attached tape drive on the host. - -* A DVD ISO file on the host file system that is mounted on a virtual DVD drive. - -[id="configure-guest-caching-for-disk_{context}"] -== Configure guest caching for disk - -Configure your disk devices to do caching by the guest and not by the host. - -Ensure that the driver element of the disk device includes the `cache="none"` and `io="native"` parameters. - -[source,xml] ----- - - -... - ----- - -[id="exclude-the-memory-ballon-device_{context}"] -== Exclude the memory balloon device - -Unless you need a dynamic memory size, do not define a memory balloon device and ensure that libvirt does not create one for you. Include the `memballoon` parameter as a child of the devices element in your domain configuration XML file. - -* Check the list of active profiles: -+ -[source,xml] ----- - ----- - -[id="tune-the-cpu-migration-algorithm-of-the-host-scheduler_{context}"] -== Tune the CPU migration algorithm of the host scheduler - -[IMPORTANT] -==== -Do not change the scheduler settings unless you are an expert who understands the implications. Do not apply changes to production systems without testing them and confirming that they have the intended effect. -==== - -The `kernel.sched_migration_cost_ns` parameter specifies a time interval in nanoseconds. After the last execution of a task, the CPU cache is considered to have useful content until this interval expires. Increasing this interval results in fewer task migrations. The default value is 500000 ns. - -If the CPU idle time is higher than expected when there are runnable processes, try reducing this interval. If tasks bounce between CPUs or nodes too often, try increasing it. - -To dynamically set the interval to 60000 ns, enter the following command: - -[source,terminal] ----- -# sysctl kernel.sched_migration_cost_ns=60000 ----- - -To persistently change the value to 60000 ns, add the following entry to `/etc/sysctl.conf`: - -[source,config] ----- -kernel.sched_migration_cost_ns=60000 ----- - -[id="disable-the-cpuset-cgroup-controller_{context}"] -== Disable the cpuset cgroup controller - -[NOTE] -==== -This setting applies only to KVM hosts with cgroups version 1. To enable CPU hotplug on the host, disable the cgroup controller. -==== - -.Procedure - -. Open `/etc/libvirt/qemu.conf` with an editor of your choice. - -. Go to the `cgroup_controllers` line. - -. Duplicate the entire line and remove the leading number sign (#) from the copy. - -. Remove the `cpuset` entry, as follows: -+ -[source,config] ----- -cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuacct" ] ----- - -. For the new setting to take effect, you must restart the libvirtd daemon: - -.. Stop all virtual machines. - -.. Run the following command: -+ -[source,terminal] ----- -# systemctl restart libvirtd ----- - -.. Restart the virtual machines. - -This setting persists across host reboots. - -[id="tune-the-polling-period-for-idle-virtual-cpus_{context}"] -== Tune the polling period for idle virtual CPUs - -When a virtual CPU becomes idle, KVM polls for wakeup conditions for the virtual CPU before allocating the host resource. You can specify the time interval, during which polling takes place in sysfs at `/sys/module/kvm/parameters/halt_poll_ns`. During the specified time, polling reduces the wakeup latency for the virtual CPU at the expense of resource usage. Depending on the workload, a longer or shorter time for polling can be beneficial. The time interval is specified in nanoseconds. The default is 50000 ns. - -* To optimize for low CPU consumption, enter a small value or write 0 to disable polling: - -+ -[source,terminal] ----- -# echo 0 > /sys/module/kvm/parameters/halt_poll_ns ----- - -* To optimize for low latency, for example for transactional workloads, enter a large value: - -+ -[source,terminal] ----- -# echo 80000 > /sys/module/kvm/parameters/halt_poll_ns ----- - diff --git a/modules/ibm-z-secure-execution.adoc b/modules/ibm-z-secure-execution.adoc deleted file mode 100644 index c0c65cdb4ef2..000000000000 --- a/modules/ibm-z-secure-execution.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc - -:_content-type: PROCEDURE -[id="installing-rhcos-using-ibm-secure-execution_{context}"] -= Installing {op-system} using IBM Secure Execution - -Before you install {op-system} using IBM Secure Execution, you must prepare the underlying infrastructure. - -.Prerequisites - -* IBM z15 or later, or {linuxoneProductName} III or later. -* {op-system-base-full} 8 or later. -* You have a bootstrap Ignition file. The file is not protected, enabling others to view and edit it. -* You have verified that the boot image has not been altered after installation. -* You must run all your nodes as IBM Secure Execution guests. - -.Procedure - -. Prepare your {op-system-base} KVM host to support IBM Secure Execution. - -** By default, KVM hosts do not support guests in IBM Secure Execution mode. To support guests in IBM Secure Execution mode, KVM hosts must boot in LPAR mode with the kernel parameter specification `prot_virt=1`. To enable `prot_virt=1` on {op-system-base} 8, follow these steps: - -.. Navigate to `/boot/loader/entries/` to modify your bootloader configuration file `*.conf`. -.. Add the kernel command line parameter `prot_virt=1`. -.. Run the `zipl` command and reboot your system. -+ -KVM hosts that successfully start with support for IBM Secure Execution for Linux issue the following kernel message: -+ -[source,terminal] ----- -prot_virt: Reserving MB as ultravisor base storage. ----- -.. To verify that the KVM host now supports IBM Secure Execution, run the following command: -+ -[source,terminal] ----- -# cat /sys/firmware/uv/prot_virt_host ----- -+ -.Example output -+ -[source,terminal] ----- -1 ----- -The value of this attribute is 1 for Linux instances that detect their environment as consistent with that of a secure host. For other instances, the value is 0. - -. Add your host keys to the KVM guest via Ignition. -+ -During the first boot, {op-system} looks for your host keys to re-encrypt itself with them. {op-system} searches for files starting with `ibm-z-hostkey-` in the `/etc/se-hostkeys` directory. All host keys, for each machine the cluster is running on, must be loaded into the directory by the administrator. After first boot, you cannot run the VM on any other machines. -+ -[NOTE] -==== -You need to prepare your Ignition file on a safe system. For example, another IBM Secure Execution guest. -==== -+ -For example: -+ -[source,terminal] ----- -{ - "ignition": { "version": "3.0.0" }, - "storage": { - "files": [ - { - "path": "/etc/se-hostkeys/ibm-z-hostkey-.crt", - "contents": { - "source": "data:;base64," - }, - "mode": 420 - }, - { - "path": "/etc/se-hostkeys/ibm-z-hostkey-.crt", - "contents": { - "source": "data:;base64," - }, - "mode": 420 - } - ] - } -} -``` ----- -+ -[NOTE] -==== -You can add as many host keys as required if you want your node to be able to run on multiple {ibmzProductName} machines. -==== -. To generate the Base64 encoded string, run the following command: -+ -[source,terminal] ----- -base64 .crt ----- -+ -Compared to guests not running IBM Secure Execution, the first boot of the machine is longer because the entire image is encrypted with a randomly generated LUKS passphrase before the Ignition phase. - -. Add Ignition protection -+ -To protect the secrets that are stored in the Ignition config file from being read or even modified, you must encrypt the Ignition config file. -+ -[NOTE] -==== -To achieve the desired security, Ignition logging and local login are disabled by default when running IBM Secure Execution. -==== -.. Fetch the public GPG key for the `secex-qemu.qcow2` image and encrypt the Ignition config with the key by running the following command: -+ -[source,terminal] ----- -gpg --recipient-file /path/to/ignition.gpg.pub --yes --output /path/to/config.ign.gpg --verbose --armor --encrypt /path/to/config.ign ----- -+ -[NOTE] -==== -Before starting the VM, replace `serial=ignition` with `serial=ignition_crypted` when mounting the Ignition file. -==== -+ -When Ignition runs on the first boot, and the decryption is successful, you will see an output like the following example: -+ -.Example output -[source,terminal] ----- -[ 2.801433] systemd[1]: Starting coreos-ignition-setup-user.service - CoreOS Ignition User Config Setup... - -[ 2.803959] coreos-secex-ignition-decrypt[731]: gpg: key : public key "Secure Execution (secex) 38.20230323.dev.0" imported -[ 2.808874] coreos-secex-ignition-decrypt[740]: gpg: encrypted with rsa4096 key, ID , created -[ OK ] Finished coreos-secex-igni…S Secex Ignition Config Decryptor. ----- -+ -If the decryption fails, you will see an output like the following example: -+ -.Example output -[source,terminal] ----- -Starting coreos-ignition-s…reOS Ignition User Config Setup... -[ 2.863675] coreos-secex-ignition-decrypt[729]: gpg: key : public key "Secure Execution (secex) 38.20230323.dev.0" imported -[ 2.869178] coreos-secex-ignition-decrypt[738]: gpg: encrypted with RSA key, ID -[ 2.870347] coreos-secex-ignition-decrypt[738]: gpg: public key decryption failed: No secret key -[ 2.870371] coreos-secex-ignition-decrypt[738]: gpg: decryption failed: No secret key ----- -+ - -. Follow the fast-track installation procedure to install nodes using the IBM Secure Exection QCOW image. \ No newline at end of file diff --git a/modules/ibmz-configure-devices-manually.adoc b/modules/ibmz-configure-devices-manually.adoc deleted file mode 100644 index 951a6f4823b4..000000000000 --- a/modules/ibmz-configure-devices-manually.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation-configuration/ibmz-post-install.adoc - -:_content-type: PROCEDURE -[id="configure-additional-devices-manually_{context}"] -= Configuring additional devices manually - -Tasks in this section describe how to manually configure additional devices in an {ibmzProductName} or {linuxoneProductName} environment. This configuration method is persistent over node restarts but not {product-title} native and you need to redo the steps if you replace the node. - -.Prerequisites - -* You are logged in to the cluster as a user with administrative privileges. -* The device must be available to the node. -* In a z/VM environment, the device must be attached to the z/VM guest. - -.Procedure - -. Connect to the node via SSH by running the following command: -+ -[source,terminal] ----- -$ ssh @ ----- -+ -You can also start a debug session to the node by running the following command: -+ -[source,terminal] ----- -$ oc debug node/ ----- - -. To enable the devices with the `chzdev` command, enter the following command: -+ -[source,terminal] ----- -$ sudo chzdev -e 0.0.8000 - sudo chzdev -e 1000-1002 - sude chzdev -e 4444 - sudo chzdev -e 0.0.8000:0x500507680d760026:0x00bc000000000000 ----- \ No newline at end of file diff --git a/modules/ibmz-configure-devices-mco.adoc b/modules/ibmz-configure-devices-mco.adoc deleted file mode 100644 index 84789c7732bb..000000000000 --- a/modules/ibmz-configure-devices-mco.adoc +++ /dev/null @@ -1,263 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation-configuration/ibmz-post-install.adoc - -:_content-type: PROCEDURE -[id="configure-additional-devices-using-mco_{context}"] -= Configuring additional devices using the Machine Config Operator (MCO) - -Tasks in this section describe how to use features of the Machine Config Operator (MCO) to configure additional devices in an {ibmzProductName} or {linuxoneProductName} environment. Configuring devices with the MCO is persistent but only allows specific configurations for compute nodes. MCO does not allow control plane nodes to have different configurations. - -.Prerequisites - -* You are logged in to the cluster as a user with administrative privileges. -* The device must be available to the z/VM guest. -* The device is already attached. -* The device is not included in the `cio_ignore` list, which can be set in the kernel parameters. -* You have created a `MachineConfig` object file with the following YAML: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker0 -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,worker0]} - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker0: "" ----- - -[id="configuring-fcp-host"] -== Configuring a Fibre Channel Protocol (FCP) host - -The following is an example of how to configure an FCP host adapter with N_Port Identifier Virtualization (NPIV) by adding a udev rule. - -.Procedure - -. Take the following sample udev rule `441-zfcp-host-0.0.8000.rules`: -+ -[source,terminal] ----- -ACTION=="add", SUBSYSTEM=="ccw", KERNEL=="0.0.8000", DRIVER=="zfcp", GOTO="cfg_zfcp_host_0.0.8000" -ACTION=="add", SUBSYSTEM=="drivers", KERNEL=="zfcp", TEST=="[ccw/0.0.8000]", GOTO="cfg_zfcp_host_0.0.8000" -GOTO="end_zfcp_host_0.0.8000" - -LABEL="cfg_zfcp_host_0.0.8000" -ATTR{[ccw/0.0.8000]online}="1" - -LABEL="end_zfcp_host_0.0.8000" ----- - -. Convert the rule to Base64 encoded by running the following command: -+ -[source,terminal] ----- -$ base64 /path/to/file/ ----- - -. Copy the following MCO sample profile into a YAML file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker0 <1> - name: 99-worker0-devices -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;base64, <2> - filesystem: root - mode: 420 - path: /etc/udev/rules.d/41-zfcp-host-0.0.8000.rules <3> ----- -<1> The role you have defined in the machine config file. -<2> The Base64 encoded string that you have generated in the previous step. -<3> The path where the udev rule is located. - -[id="configuring-fcp-lun"] -== Configuring an FCP LUN -The following is an example of how to configure an FCP LUN by adding a udev rule. You can add new FCP LUNs or add additional paths to LUNs that are already configured with multipathing. - -.Procedure - -. Take the following sample udev rule `41-zfcp-lun-0.0.8000:0x500507680d760026:0x00bc000000000000.rules`: -+ -[source,terminal] ----- -ACTION=="add", SUBSYSTEMS=="ccw", KERNELS=="0.0.8000", GOTO="start_zfcp_lun_0.0.8207" -GOTO="end_zfcp_lun_0.0.8000" - -LABEL="start_zfcp_lun_0.0.8000" -SUBSYSTEM=="fc_remote_ports", ATTR{port_name}=="0x500507680d760026", GOTO="cfg_fc_0.0.8000_0x500507680d760026" -GOTO="end_zfcp_lun_0.0.8000" - -LABEL="cfg_fc_0.0.8000_0x500507680d760026" -ATTR{[ccw/0.0.8000]0x500507680d760026/unit_add}="0x00bc000000000000" -GOTO="end_zfcp_lun_0.0.8000" - -LABEL="end_zfcp_lun_0.0.8000" ----- - -. Convert the rule to Base64 encoded by running the following command: -+ -[source,terminal] ----- -$ base64 /path/to/file/ ----- - -. Copy the following MCO sample profile into a YAML file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker0 <1> - name: 99-worker0-devices -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;base64, <2> - filesystem: root - mode: 420 - path: /etc/udev/rules.d/41-zfcp-lun-0.0.8000:0x500507680d760026:0x00bc000000000000.rules <3> ----- -<1> The role you have defined in the machine config file. -<2> The Base64 encoded string that you have generated in the previous step. -<3> The path where the udev rule is located. - -[id="configuring-dasd"] -== Configuring DASD - -The following is an example of how to configure a DASD device by adding a udev rule. - -.Procedure - -. Take the following sample udev rule `41-dasd-eckd-0.0.4444.rules`: -+ -[source,terminal] ----- -ACTION=="add", SUBSYSTEM=="ccw", KERNEL=="0.0.4444", DRIVER=="dasd-eckd", GOTO="cfg_dasd_eckd_0.0.4444" -ACTION=="add", SUBSYSTEM=="drivers", KERNEL=="dasd-eckd", TEST=="[ccw/0.0.4444]", GOTO="cfg_dasd_eckd_0.0.4444" -GOTO="end_dasd_eckd_0.0.4444" - -LABEL="cfg_dasd_eckd_0.0.4444" -ATTR{[ccw/0.0.4444]online}="1" - -LABEL="end_dasd_eckd_0.0.4444" ----- - -. Convert the rule to Base64 encoded by running the following command: -+ -[source,terminal] ----- -$ base64 /path/to/file/ ----- - -. Copy the following MCO sample profile into a YAML file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker0 <1> - name: 99-worker0-devices -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;base64, <2> - filesystem: root - mode: 420 - path: /etc/udev/rules.d/41-dasd-eckd-0.0.4444.rules <3> ----- -<1> The role you have defined in the machine config file. -<2> The Base64 encoded string that you have generated in the previous step. -<3> The path where the udev rule is located. - -[id="configuring-qeth"] -== Configuring qeth - -The following is an example of how to configure a qeth device by adding a udev rule. - -.Procedure - -. Take the following sample udev rule `41-qeth-0.0.1000.rules`: -+ -[source,terminal] ----- -ACTION=="add", SUBSYSTEM=="drivers", KERNEL=="qeth", GOTO="group_qeth_0.0.1000" -ACTION=="add", SUBSYSTEM=="ccw", KERNEL=="0.0.1000", DRIVER=="qeth", GOTO="group_qeth_0.0.1000" -ACTION=="add", SUBSYSTEM=="ccw", KERNEL=="0.0.1001", DRIVER=="qeth", GOTO="group_qeth_0.0.1000" -ACTION=="add", SUBSYSTEM=="ccw", KERNEL=="0.0.1002", DRIVER=="qeth", GOTO="group_qeth_0.0.1000" -ACTION=="add", SUBSYSTEM=="ccwgroup", KERNEL=="0.0.1000", DRIVER=="qeth", GOTO="cfg_qeth_0.0.1000" -GOTO="end_qeth_0.0.1000" - -LABEL="group_qeth_0.0.1000" -TEST=="[ccwgroup/0.0.1000]", GOTO="end_qeth_0.0.1000" -TEST!="[ccw/0.0.1000]", GOTO="end_qeth_0.0.1000" -TEST!="[ccw/0.0.1001]", GOTO="end_qeth_0.0.1000" -TEST!="[ccw/0.0.1002]", GOTO="end_qeth_0.0.1000" -ATTR{[drivers/ccwgroup:qeth]group}="0.0.1000,0.0.1001,0.0.1002" -GOTO="end_qeth_0.0.1000" - -LABEL="cfg_qeth_0.0.1000" -ATTR{[ccwgroup/0.0.1000]online}="1" - -LABEL="end_qeth_0.0.1000" ----- - -. Convert the rule to Base64 encoded by running the following command: -+ -[source,terminal] ----- -$ base64 /path/to/file/ ----- - -. Copy the following MCO sample profile into a YAML file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker0 <1> - name: 99-worker0-devices -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;base64, <2> - filesystem: root - mode: 420 - path: /etc/udev/rules.d/41-dasd-eckd-0.0.4444.rules <3> ----- -<1> The role you have defined in the machine config file. -<2> The Base64 encoded string that you have generated in the previous step. -<3> The path where the udev rule is located. - diff --git a/modules/ibmz-configure-nbde-with-static-ip.adoc b/modules/ibmz-configure-nbde-with-static-ip.adoc deleted file mode 100644 index d023b50b5666..000000000000 --- a/modules/ibmz-configure-nbde-with-static-ip.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] - -:_content-type: PROCEDURE -[id="configuring-nbde-static-ip-ibmz-linuxone-environment_{context}"] -= Configuring NBDE with static IP in an {ibmzProductName} or {linuxoneProductName} environment - -Enabling NBDE disk encryption in an {ibmzProductName} or {linuxoneProductName} environment requires additional steps, which are described in detail in this section. - -.Prerequisites - -* You have set up the External Tang Server. See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#network-bound-disk-encryption_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Network-bound disk encryption] for instructions. -* You have installed the `butane` utility. -* You have reviewed the instructions for how to create machine configs with Butane. - -.Procedure - -. Create Butane configuration files for the control plane and compute nodes. -+ -The following example of a Butane configuration for a control plane node creates a file named `master-storage.bu` for disk encryption: -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: master-storage - labels: - machineconfiguration.openshift.io/role: master -storage: - luks: - - clevis: - tang: - - thumbprint: QcPr_NHFJammnRCA3fFMVdNBwjs - url: http://clevis.example.com:7500 - options: <1> - - --cipher - - aes-cbc-essiv:sha256 -ifndef::ibm-z-kvm[] - device: /dev/disk/by-partlabel/root <2> -endif::ibm-z-kvm[] -ifdef::ibm-z-kvm[] - device: /dev/disk/by-partlabel/root -endif::ibm-z-kvm[] - label: luks-root - name: root - wipe_volume: true - filesystems: - - device: /dev/mapper/root - format: xfs - label: root - wipe_filesystem: true -openshift: -ifndef::ibm-z-kvm[] - fips: true <3> -endif::ibm-z-kvm[] -ifdef::ibm-z-kvm[] - fips: true <2> -endif::ibm-z-kvm[] ----- -ifdef::ibm-z-kvm[] -<1> The cipher option is only required if FIPS mode is enabled. Omit the entry if FIPS is disabled. -<2> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -endif::ibm-z-kvm[] -ifndef::ibm-z-kvm[] -<1> The cipher option is only required if FIPS mode is enabled. Omit the entry if FIPS is disabled. -<2> For installations on DASD-type disks, replace with `device: /dev/disk/by-label/root`. -<3> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -endif::ibm-z-kvm[] - -. Create a customized initramfs file to boot the machine, by running the following command: -+ -[source,terminal] ----- -$ coreos-installer pxe customize \ - /root/rhcos-bootfiles/rhcos--live-initramfs.s390x.img \ - --dest-device /dev/sda --dest-karg-append \ - ip=::::::none \ - --dest-karg-append nameserver= \ - --dest-karg-append rd.neednet=1 -o \ - /root/rhcos-bootfiles/-initramfs.s390x.img ----- -+ -[NOTE] -==== -Before first boot, you must customize the initramfs for each node in the cluster, and add PXE kernel parameters. -==== - -. Create a parameter file that includes `ignition.platform.id=metal` and `ignition.firstboot`. -+ -.Example kernel parameter file for the control plane machine: -+ -ifndef::ibm-z-kvm[] -[source,terminal] ----- -rd.neednet=1 \ -console=ttysclp0 \ -coreos.inst.install_dev=/dev/dasda \ <1> -ignition.firstboot ignition.platform.id=metal \ -coreos.live.rootfs_url=http://10.19.17.25/redhat/ocp/rhcos-413.86.202302201445-0/rhcos-413.86.202302201445-0-live-rootfs.s390x.img \ -coreos.inst.ignition_url=http://bastion.ocp-cluster1.example.com:8080/ignition/master.ign \ -ip=10.19.17.2::10.19.17.1:255.255.255.0::enbdd0:none nameserver=10.19.17.1 \ -zfcp.allow_lun_scan=0 \ <2> -rd.znet=qeth,0.0.bdd0,0.0.bdd1,0.0.bdd2,layer2=1 \ -rd.zfcp=0.0.5677,0x600606680g7f0056,0x034F000000000000 \ <3> -zfcp.allow_lun_scan=0 \ -rd.znet=qeth,0.0.bdd0,0.0.bdd1,0.0.bdd2,layer2=1 \ -rd.zfcp=0.0.5677,0x600606680g7f0056,0x034F000000000000 ----- -<1> For installations on DASD-type disks, add `coreos.inst.install_dev=/dev/dasda`. Omit this value for FCP-type disks. -<2> For installations on FCP-type disks, add `zfcp.allow_lun_scan=0`. Omit this value for DASD-type disks. -<3> For installations on DASD-type disks, replace with `rd.dasd=0.0.3490` to specify the DASD device. -endif::ibm-z-kvm[] -ifdef::ibm-z-kvm[] -[source,terminal] ----- -rd.neednet=1 \ -console=ttysclp0 \ -ignition.firstboot ignition.platform.id=metal \ -coreos.live.rootfs_url=http://10.19.17.25/redhat/ocp/rhcos-413.86.202302201445-0/rhcos-413.86.202302201445-0-live-rootfs.s390x.img \ -coreos.inst.ignition_url=http://bastion.ocp-cluster1.example.com:8080/ignition/master.ign \ -ip=10.19.17.2::10.19.17.1:255.255.255.0::enbdd0:none nameserver=10.19.17.1 \ -zfcp.allow_lun_scan=0 \ -rd.znet=qeth,0.0.bdd0,0.0.bdd1,0.0.bdd2,layer2=1 \ -rd.zfcp=0.0.5677,0x600606680g7f0056,0x034F000000000000 ----- -endif::ibm-z-kvm[] -+ -[NOTE] -==== -Write all options in the parameter file as a single line and make sure you have no newline characters. -==== - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] \ No newline at end of file diff --git a/modules/ibmz-enable-multipathing-fcp-luns.adoc b/modules/ibmz-enable-multipathing-fcp-luns.adoc deleted file mode 100644 index 44e1bc4cd578..000000000000 --- a/modules/ibmz-enable-multipathing-fcp-luns.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation-configuration/ibmz-post-install.adoc - -:_content-type: PROCEDURE -[id="enabling-multipathing-fcp-luns_{context}"] -= Enabling multipathing for FCP LUNs - -Tasks in this section describe how to manually configure additional devices in an {ibmzProductName} or {linuxoneProductName} environment. This configuration method is persistent over node restarts but not {product-title} native and you need to redo the steps if you replace the node. - -[IMPORTANT] -==== -On {ibmzProductName} and {linuxoneProductName}, you can enable multipathing only if you configured your cluster for it during installation. For more information, see "Installing {op-system} and starting the {product-title} bootstrap process" in _Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName}_. -==== - -.Prerequisites - -* You are logged in to the cluster as a user with administrative privileges. -* You have configured multiple paths to a LUN with either method explained above. - -.Procedure - -. Connect to the node via SSH by running the following command: -+ -[source,terminal] ----- -$ ssh @ ----- -+ -You can also start a debug session to the node by running the following command: -+ -[source,terminal] ----- -$ oc debug node/ ----- - -. To enable multipathing, run the following command: -+ -[source,terminal] ----- -$ sudo /sbin/mpathconf --enable ----- - -. To start the `multipathd` daemon, run the following command: -+ -[source,terminal] ----- -$ sudo multipath ----- - -. Optional: To format your multipath device with fdisk, run the following command: -+ -[source,terminal] ----- -$ sudo fdisk /dev/mapper/mpatha ----- - -.Verification - -* To verify that the devices have been grouped, run the following command: -+ -[source,terminal] ----- -$ sudo multipath -II ----- -+ -.Example output -+ -[source,terminal] ----- -mpatha (20017380030290197) dm-1 IBM,2810XIV - size=512G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw - -+- policy='service-time 0' prio=50 status=enabled - |- 1:0:0:6 sde 68:16 active ready running - |- 1:0:1:6 sdf 69:24 active ready running - |- 0:0:0:6 sdg 8:80 active ready running - `- 0:0:1:6 sdh 66:48 active ready running ----- \ No newline at end of file diff --git a/modules/identity-provider-about-basic-authentication.adoc b/modules/identity-provider-about-basic-authentication.adoc deleted file mode 100644 index aa7370657006..000000000000 --- a/modules/identity-provider-about-basic-authentication.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc - -:_content-type: CONCEPT -[id="identity-provider-about-basic-authentication_{context}"] -= About basic authentication - -Basic authentication is a generic back-end integration mechanism that allows -users to log in to {product-title} with credentials validated against a remote -identity provider. - -Because basic authentication is generic, you can use this identity -provider for advanced authentication configurations. - -[IMPORTANT] -==== -Basic authentication must use an HTTPS connection to the remote server to -prevent potential snooping of the user ID and password and man-in-the-middle -attacks. -==== - -With basic authentication configured, users send their user name -and password to {product-title}, which then validates those credentials against -a remote server by making a server-to-server request, passing the credentials as -a basic authentication header. This requires users to send their credentials to -{product-title} during login. - -[NOTE] -==== -This only works for user name/password login mechanisms, and {product-title} must -be able to make network requests to the remote authentication server. -==== - -User names and passwords are validated against a remote URL that is protected -by basic authentication and returns JSON. - -A `401` response indicates failed authentication. - -A non-`200` status, or the presence of a non-empty "error" key, indicates an -error: - -[source,terminal] ----- -{"error":"Error message"} ----- - -A `200` status with a `sub` (subject) key indicates success: - -[source,terminal] ----- -{"sub":"userid"} <1> ----- -<1> The subject must be unique to the authenticated user and must not be able to -be modified. - -A successful response can optionally provide additional data, such as: - -* A display name using the `name` key. For example: -+ -[source,terminal] ----- -{"sub":"userid", "name": "User Name", ...} ----- -+ -* An email address using the `email` key. For example: -+ -[source,terminal] ----- -{"sub":"userid", "email":"user@example.com", ...} ----- -+ -* A preferred user name using the `preferred_username` key. This is useful when -the unique, unchangeable subject is a database key or UID, and a more -human-readable name exists. This is used as a hint when provisioning the -{product-title} user for the authenticated identity. For example: -+ -[source,terminal] ----- -{"sub":"014fbff9a07c", "preferred_username":"bob", ...} ----- diff --git a/modules/identity-provider-about-ldap.adoc b/modules/identity-provider-about-ldap.adoc deleted file mode 100644 index 32b452b4ba4d..000000000000 --- a/modules/identity-provider-about-ldap.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc - -:_content-type: CONCEPT -[id="identity-provider-about-ldap_{context}"] -= About LDAP authentication - -During authentication, the LDAP directory is searched for an entry that matches -the provided user name. If a single unique match is found, a simple bind is -attempted using the distinguished name (DN) of the entry plus the provided -password. - -These are the steps taken: - -. Generate a search filter by combining the attribute and filter in the -configured `url` with the user-provided user name. -. Search the directory using the generated filter. If the search does not return -exactly one entry, deny access. -. Attempt to bind to the LDAP server using the DN of the entry retrieved from -the search, and the user-provided password. -. If the bind is unsuccessful, deny access. -. If the bind is successful, build an identity using the configured attributes -as the identity, email address, display name, and preferred user name. - -The configured `url` is an RFC 2255 URL, which specifies the LDAP host and -search parameters to use. The syntax of the URL is: - ----- -ldap://host:port/basedn?attribute?scope?filter ----- - -For this URL: - -[cols="2a,8a",options="header"] -|=== -|URL component | Description -.^|`ldap` | For regular LDAP, use the string `ldap`. For secure LDAP -(LDAPS), use `ldaps` instead. -.^|`host:port` | The name and port of the LDAP server. Defaults to -`localhost:389` for ldap and `localhost:636` for LDAPS. -.^|`basedn` | The DN of the branch of the directory where all searches should -start from. At the very least, this must be the top of your directory tree, but -it could also specify a subtree in the directory. -.^|`attribute` | The attribute to search for. Although RFC 2255 allows a -comma-separated list of attributes, only the first attribute will be used, no -matter how many are provided. If no attributes are provided, the default is to -use `uid`. It is recommended to choose an attribute that will be unique across -all entries in the subtree you will be using. -.^|`scope` | The scope of the search. Can be either `one` or `sub`. -If the scope is not provided, the default is to use a scope of `sub`. -.^|`filter` | A valid LDAP search filter. If not provided, defaults to -`(objectClass=*)` -|=== - -When doing searches, the attribute, filter, and provided user name are combined -to create a search filter that looks like: - ----- -(&()(=)) ----- - -For example, consider a URL of: - ----- -ldap://ldap.example.com/o=Acme?cn?sub?(enabled=true) ----- - -When a client attempts to connect using a user name of `bob`, the resulting -search filter will be `(&(enabled=true)(cn=bob))`. - -If the LDAP directory requires authentication to search, specify a `bindDN` and -`bindPassword` to use to perform the entry search. diff --git a/modules/identity-provider-about-request-header.adoc b/modules/identity-provider-about-request-header.adoc deleted file mode 100644 index 17f0d8dd902a..000000000000 --- a/modules/identity-provider-about-request-header.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc - -:_content-type: CONCEPT -[id="identity-provider-about-request-header_{context}"] -= About request header authentication - -A request header identity provider identifies users from request -header values, such as `X-Remote-User`. It is typically used in combination with -an authenticating proxy, which sets the request header value. The -request header identity provider cannot be combined with other identity providers -that use direct password logins, such as htpasswd, Keystone, LDAP or basic authentication. - -[NOTE] -==== -You can also use the request header identity provider for advanced configurations -such as the community-supported link:https://github.com/openshift/request-header-saml-service-provider[SAML authentication]. -Note that this solution is not supported by Red Hat. -==== - -For users to authenticate using this identity provider, they must access -`https://__/oauth/authorize` (and subpaths) via an authenticating proxy. -To accomplish this, configure the OAuth server to redirect unauthenticated -requests for OAuth tokens to the proxy endpoint that proxies to -`https://__/oauth/authorize`. - -To redirect unauthenticated requests from clients expecting browser-based login flows: - -* Set the `provider.loginURL` parameter to the authenticating proxy URL that -will authenticate interactive clients and then proxy the request to -`https://__/oauth/authorize`. - -To redirect unauthenticated requests from clients expecting `WWW-Authenticate` challenges: - -* Set the `provider.challengeURL` parameter to the authenticating proxy URL that -will authenticate clients expecting `WWW-Authenticate` challenges and then proxy -the request to `https://__/oauth/authorize`. - -The `provider.challengeURL` and `provider.loginURL` parameters can include -the following tokens in the query portion of the URL: - -* `${url}` is replaced with the current URL, escaped to be safe in a query parameter. -+ -For example: [x-]`https://www.example.com/sso-login?then=${url}` - -* `${query}` is replaced with the current query string, unescaped. -+ -For example: [x-]`https://www.example.com/auth-proxy/oauth/authorize?${query}` - -[IMPORTANT] -==== -As of {product-title} 4.1, your proxy must support mutual TLS. -==== - -[id="sspi-windows_{context}"] -== SSPI connection support on Microsoft Windows - -ifdef::openshift-enterprise,openshift-webscale[] - -:FeatureName: Using SSPI connection support on Microsoft Windows -include::snippets/technology-preview.adoc[leveloffset=+1] - -endif::[] - -The OpenShift CLI (`oc`) supports the Security Support Provider Interface (SSPI) to allow for SSO -flows on Microsft Windows. If you use the request header identity provider with a -GSSAPI-enabled proxy to connect an Active Directory server to {product-title}, -users can automatically authenticate to {product-title} by using the `oc` command -line interface from a domain-joined Microsoft Windows computer. diff --git a/modules/identity-provider-add.adoc b/modules/identity-provider-add.adoc deleted file mode 100644 index 78a9612ec1fd..000000000000 --- a/modules/identity-provider-add.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-allow-all-identity-provider.adoc -// * authentication/identity_providers/configuring-deny-all-identity-provider.adoc -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -// GitHub and Google IDPs do not support username/password login commands -ifeval::["{context}" == "configuring-github-identity-provider"] -:no-username-password-login: -endif::[] -ifeval::["{context}" == "configuring-google-identity-provider"] -:no-username-password-login: -endif::[] -// Only some OIDC IDPs support username/password login commands -ifeval::["{context}" == "configuring-oidc-identity-provider"] -:no-username-password-login: -:oidc: -endif::[] - -:_content-type: PROCEDURE -[id="add-identity-provider_{context}"] -= Adding an identity provider to your cluster - -After you install your cluster, add an identity provider to it so your -users can authenticate. - -.Prerequisites - -* Create an {product-title} cluster. -* Create the custom resource (CR) for your identity providers. -* You must be logged in as an administrator. - -.Procedure - -. Apply the defined CR: -+ -[source,terminal] ----- -$ oc apply -f ----- -+ -[NOTE] -==== -If a CR does not exist, `oc apply` creates a new CR and might trigger the following warning: `Warning: oc apply should be used on resources created by either oc create --save-config or oc apply`. In this case you can safely ignore this warning. -==== - -ifndef::no-username-password-login[] -. Log in to the cluster as a user from your identity provider, entering the -password when prompted. -+ -[source,terminal] ----- -$ oc login -u ----- -endif::no-username-password-login[] - -ifdef::no-username-password-login[] - -. Obtain a token from the OAuth server. -+ -As long as the `kubeadmin` user has been removed, the `oc login` command provides instructions on how to access a web page where you can retrieve the token. -+ -You can also access this page from the web console by navigating to *(?) Help* -> *Command Line Tools* -> *Copy Login Command*. - -. Log in to the cluster, passing in the token to authenticate. -+ -[source,terminal] ----- -$ oc login --token= ----- -+ -[NOTE] -==== -ifdef::oidc[] -If your OpenID Connect identity provider supports the resource owner password credentials (ROPC) grant flow, you can log in with a user name and password. You might need to take steps to enable the ROPC grant flow for your identity provider. - -After the OIDC identity provider is configured in {product-title}, you can log in by using the following command, which prompts for your user name and password: - -[source,terminal] ----- -$ oc login -u --server= ----- -endif::oidc[] - -ifndef::oidc[] -This identity provider does not support logging in with a user name and password. -endif::oidc[] -==== -endif::no-username-password-login[] - -. Confirm that the user logged in successfully, and display the user name. -+ -[source,terminal] ----- -$ oc whoami ----- - -// Undefining attributes -ifeval::["{context}" == "configuring-google-identity-provider"] -:!no-username-password-login: -endif::[] -ifeval::["{context}" == "configuring-oidc-identity-provider"] -:!no-username-password-login: -:!oidc: -endif::[] -ifeval::["{context}" == "configuring-github-identity-provider"] -:!no-username-password-login: -endif::[] diff --git a/modules/identity-provider-apache-custom-proxy-configuration.adoc b/modules/identity-provider-apache-custom-proxy-configuration.adoc deleted file mode 100644 index 843f50171681..000000000000 --- a/modules/identity-provider-apache-custom-proxy-configuration.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc - -[id="identity-provider-apache-custom-proxy-configuration_{context}"] -= Custom proxy configuration - -Using the `mod_auth_gssapi` module is a popular way to configure the Apache -authentication proxy using the request header identity provider; however, it is -not required. Other proxies can easily be used if the following requirements are -met: - -* Block the `X-Remote-User` header from client requests to prevent spoofing. -* Enforce client certificate authentication in the `RequestHeaderIdentityProvider` -configuration. -* Require the `X-Csrf-Token` header be set for all authentication requests using -the challenge flow. -* Make sure only the `/oauth/authorize` endpoint and its subpaths are proxied; -redirects must be rewritten to allow the backend server to send the client to -the correct location. -* The URL that proxies to `\https:///oauth/authorize` must end -with `/authorize` with no trailing slash. For example, `\https://proxy.example.com/login-proxy/authorize?...` -must proxy to `\https:///oauth/authorize?...`. -+ -* Subpaths of the URL that proxies to `\https:///oauth/authorize` -must proxy to subpaths of `\https:///oauth/authorize`. For -example, `\https://proxy.example.com/login-proxy/authorize/approve?...` must -proxy to `\https:///oauth/authorize/approve?...`. - -[NOTE] -==== -The `\https://` address is the route to the OAuth server and -can be obtained by running `oc get route -n openshift-authentication`. -==== diff --git a/modules/identity-provider-basic-authentication-CR.adoc b/modules/identity-provider-basic-authentication-CR.adoc deleted file mode 100644 index dd184c921558..000000000000 --- a/modules/identity-provider-basic-authentication-CR.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc - -[id="identity-provider-basic-authentication-CR_{context}"] -= Sample basic authentication CR - -The following custom resource (CR) shows the parameters and acceptable values for a -basic authentication identity provider. - -.Basic authentication CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: basicidp <1> - mappingMethod: claim <2> - type: BasicAuth - basicAuth: - url: https://www.example.com/remote-idp <3> - ca: <4> - name: ca-config-map - tlsClientCert: <5> - name: client-cert-secret - tlsClientKey: <6> - name: client-key-secret ----- -<1> This provider name is prefixed to the returned user ID to form an identity -name. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> URL accepting credentials in Basic authentication headers. -<4> Optional: Reference to an {product-title} `ConfigMap` object containing the -PEM-encoded certificate authority bundle to use in validating server -certificates for the configured URL. -<5> Optional: Reference to an {product-title} `Secret` object containing the client -certificate to present when making requests to the configured URL. -<6> Reference to an {product-title} `Secret` object containing the key for the -client certificate. Required if `tlsClientCert` is specified. diff --git a/modules/identity-provider-basic-authentication-troubleshooting.adoc b/modules/identity-provider-basic-authentication-troubleshooting.adoc deleted file mode 100644 index b8b3856043d0..000000000000 --- a/modules/identity-provider-basic-authentication-troubleshooting.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc - -[id="identity-provider-basic-authentication-troubleshooting_{context}"] -= Basic authentication troubleshooting - -The most common issue relates to network connectivity to the backend server. For -simple debugging, run `curl` commands on the master. To test for a successful -login, replace the `` and `` in the following example command -with valid credentials. To test an invalid login, replace them with false -credentials. - -[source,terminal] ----- -$ curl --cacert /path/to/ca.crt --cert /path/to/client.crt --key /path/to/client.key -u : -v https://www.example.com/remote-idp ----- - -*Successful responses* - -A `200` status with a `sub` (subject) key indicates success: - -[source,terminal] ----- -{"sub":"userid"} ----- -The subject must be unique to the authenticated user, and must not be able to -be modified. - -A successful response can optionally provide additional data, such as: - -* A display name using the `name` key: -+ -[source,terminal] ----- -{"sub":"userid", "name": "User Name", ...} ----- -* An email address using the `email` key: -+ -[source,terminal] ----- -{"sub":"userid", "email":"user@example.com", ...} ----- -* A preferred user name using the `preferred_username` key: -+ -[source,terminal] ----- -{"sub":"014fbff9a07c", "preferred_username":"bob", ...} ----- -+ -The `preferred_username` key is useful when -the unique, unchangeable subject is a database key or UID, and a more -human-readable name exists. This is used as a hint when provisioning the -{product-title} user for the authenticated identity. - -*Failed responses* - -- A `401` response indicates failed authentication. -- A non-`200` status or the presence of a non-empty "error" key indicates an -error: `{"error":"Error message"}` diff --git a/modules/identity-provider-config-map.adoc b/modules/identity-provider-config-map.adoc deleted file mode 100644 index 4932e8d886e6..000000000000 --- a/modules/identity-provider-config-map.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc - -ifeval::["{context}" == "configuring-github-identity-provider"] -:github: -endif::[] - -:_content-type: PROCEDURE -[id="identity-provider-creating-configmap_{context}"] -= Creating a config map - -Identity providers use {product-title} `ConfigMap` objects in the `openshift-config` -namespace to contain the certificate authority bundle. These are primarily -used to contain certificate bundles needed by the identity provider. - -ifdef::github[] -[NOTE] -==== -This procedure is only required for GitHub Enterprise. -==== -endif::github[] - -.Procedure - -* Define an {product-title} `ConfigMap` object containing the -certificate authority by using the following command. The certificate -authority must be stored in the `ca.crt` key of the `ConfigMap` object. -+ -[source,terminal] ----- -$ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ca-config-map - namespace: openshift-config -data: - ca.crt: | - ----- -==== - -// Undefining attributes -ifeval::["{context}" == "configuring-google-identity-provider"] -:!github: -endif::[] diff --git a/modules/identity-provider-configuring-apache-request-header.adoc b/modules/identity-provider-configuring-apache-request-header.adoc deleted file mode 100644 index 88515151d9a1..000000000000 --- a/modules/identity-provider-configuring-apache-request-header.adoc +++ /dev/null @@ -1,261 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-configuring-apache-request-header_{context}"] -= Configuring Apache authentication using request header - -This example uses the `mod_auth_gssapi` module to configure an Apache -authentication proxy using the request header identity provider. - -.Prerequisites - -* Obtain the `mod_auth_gssapi` module from the -link:https://access.redhat.com/solutions/392003[Optional channel]. -You must have the following packages installed on your local machine: -+ -** `httpd` -** `mod_ssl` -** `mod_session` -** `apr-util-openssl` -** `mod_auth_gssapi` - -* Generate a CA for validating requests that submit the trusted header. Define -an {product-title} `ConfigMap` object containing the CA. This is done by running: -+ -[source,terminal] ----- -$ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config <1> ----- -<1> The CA must be stored in the `ca.crt` key of the `ConfigMap` object. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ca-config-map - namespace: openshift-config -data: - ca.crt: | - ----- -==== - -* Generate a client certificate for the proxy. You can generate this certificate -by using any x509 certificate tooling. The client certificate must be signed by -the CA you generated for validating requests that submit the trusted header. - -* Create the custom resource (CR) for your identity providers. - -.Procedure - -This proxy uses a client certificate to connect to the OAuth server, which -is configured to trust the `X-Remote-User` header. - -. Create the certificate for the Apache configuration. The certificate that you -specify as the `SSLProxyMachineCertificateFile` parameter value is the proxy's -client certificate that is used to authenticate the proxy to the server. It must -use `TLS Web Client Authentication` as the extended key type. - -. Create the Apache configuration. Use the following template to provide your -required settings and values: -+ -[IMPORTANT] -==== -Carefully review the template and customize its contents to fit your -environment. -==== -+ ----- -LoadModule request_module modules/mod_request.so -LoadModule auth_gssapi_module modules/mod_auth_gssapi.so -# Some Apache configurations might require these modules. -# LoadModule auth_form_module modules/mod_auth_form.so -# LoadModule session_module modules/mod_session.so - -# Nothing needs to be served over HTTP. This virtual host simply redirects to -# HTTPS. - - DocumentRoot /var/www/html - RewriteEngine On - RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R,L] - - - - # This needs to match the certificates you generated. See the CN and X509v3 - # Subject Alternative Name in the output of: - # openssl x509 -text -in /etc/pki/tls/certs/localhost.crt - ServerName www.example.com - - DocumentRoot /var/www/html - SSLEngine on - SSLCertificateFile /etc/pki/tls/certs/localhost.crt - SSLCertificateKeyFile /etc/pki/tls/private/localhost.key - SSLCACertificateFile /etc/pki/CA/certs/ca.crt - - SSLProxyEngine on - SSLProxyCACertificateFile /etc/pki/CA/certs/ca.crt - # It is critical to enforce client certificates. Otherwise, requests can - # spoof the X-Remote-User header by accessing the /oauth/authorize endpoint - # directly. - SSLProxyMachineCertificateFile /etc/pki/tls/certs/authproxy.pem - - # To use the challenging-proxy, an X-Csrf-Token must be present. - RewriteCond %{REQUEST_URI} ^/challenging-proxy - RewriteCond %{HTTP:X-Csrf-Token} ^$ [NC] - RewriteRule ^.* - [F,L] - - - # Insert your backend server name/ip here. - ProxyPass https:///oauth/authorize - AuthName "SSO Login" - # For Kerberos - AuthType GSSAPI - Require valid-user - RequestHeader set X-Remote-User %{REMOTE_USER}s - - GssapiCredStore keytab:/etc/httpd/protected/auth-proxy.keytab - # Enable the following if you want to allow users to fallback - # to password based authentication when they do not have a client - # configured to perform kerberos authentication. - GssapiBasicAuth On - - # For ldap: - # AuthBasicProvider ldap - # AuthLDAPURL "ldap://ldap.example.com:389/ou=People,dc=my-domain,dc=com?uid?sub?(objectClass=*)" - - - - # Insert your backend server name/ip here. - ProxyPass https:///oauth/authorize - - AuthName "SSO Login" - AuthType GSSAPI - Require valid-user - RequestHeader set X-Remote-User %{REMOTE_USER}s env=REMOTE_USER - - GssapiCredStore keytab:/etc/httpd/protected/auth-proxy.keytab - # Enable the following if you want to allow users to fallback - # to password based authentication when they do not have a client - # configured to perform kerberos authentication. - GssapiBasicAuth On - - ErrorDocument 401 /login.html - - - - -RequestHeader unset X-Remote-User ----- -+ -[NOTE] -==== -The `\https://` address is the route to the OAuth server and -can be obtained by running `oc get route -n openshift-authentication`. -==== - -. Update the `identityProviders` stanza in the custom resource (CR): -+ -[source,yaml] ----- -identityProviders: - - name: requestheaderidp - type: RequestHeader - requestHeader: - challengeURL: "https:///challenging-proxy/oauth/authorize?${query}" - loginURL: "https:///login-proxy/oauth/authorize?${query}" - ca: - name: ca-config-map - clientCommonNames: - - my-auth-proxy - headers: - - X-Remote-User ----- - -. Verify the configuration. - -.. Confirm that you can bypass the proxy by requesting a token by supplying the -correct client certificate and header: -+ -[source,terminal] ----- -# curl -L -k -H "X-Remote-User: joe" \ - --cert /etc/pki/tls/certs/authproxy.pem \ - https:///oauth/token/request ----- - -.. Confirm that requests that do not supply the client certificate fail by -requesting a token without the certificate: -+ -[source,terminal] ----- -# curl -L -k -H "X-Remote-User: joe" \ - https:///oauth/token/request ----- - -.. Confirm that the `challengeURL` redirect is active: -+ -[source,terminal] ----- -# curl -k -v -H 'X-Csrf-Token: 1' \ - https:///oauth/authorize?client_id=openshift-challenging-client&response_type=token ----- -+ -Copy the `challengeURL` redirect to use in the next step. - -.. Run this command to show a `401` response with a `WWW-Authenticate` basic -challenge, a negotiate challenge, or both challenges: -+ -[source,terminal] ----- -# curl -k -v -H 'X-Csrf-Token: 1' \ - ----- - -.. Test logging in to the OpenShift CLI (`oc`) with and without using a Kerberos -ticket: -... If you generated a Kerberos ticket by using `kinit`, destroy it: -+ -[source,terminal] ----- -# kdestroy -c cache_name <1> ----- -+ -<1> Make sure to provide the name of your Kerberos cache. -... Log in to the `oc` tool by using your Kerberos credentials: -+ -[source,terminal] ----- -# oc login -u ----- -+ -Enter your Kerberos password at the prompt. -... Log out of the `oc` tool: -+ -[source,terminal] ----- -# oc logout ----- -... Use your Kerberos credentials to get a ticket: -+ -[source,terminal] ----- -# kinit ----- -+ -Enter your Kerberos user name and password at the prompt. -... Confirm that you can log in to the `oc` tool: -+ -[source,terminal] ----- -# oc login ----- -+ -If your configuration is correct, you are logged in without entering separate -credentials. diff --git a/modules/identity-provider-configuring-using-web-console.adoc b/modules/identity-provider-configuring-using-web-console.adoc deleted file mode 100644 index 883ec4e64652..000000000000 --- a/modules/identity-provider-configuring-using-web-console.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -//* authentication/identity_providers/configuring-oidc-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-configuring-using-the-web-console_{context}"] -= Configuring identity providers using the web console - -Configure your identity provider (IDP) through the web console instead of the CLI. - -.Prerequisites - -* You must be logged in to the web console as a cluster administrator. - -.Procedure - -. Navigate to *Administration* -> *Cluster Settings*. -. Under the *Configuration* tab, click *OAuth*. -. Under the *Identity Providers* section, select your identity provider from the -*Add* drop-down menu. - -[NOTE] -==== -You can specify multiple IDPs through the web console without overwriting -existing IDPs. -==== diff --git a/modules/identity-provider-creating-htpasswd-file-linux.adoc b/modules/identity-provider-creating-htpasswd-file-linux.adoc deleted file mode 100644 index bb56e6701439..000000000000 --- a/modules/identity-provider-creating-htpasswd-file-linux.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-htpasswd-file-linux_{context}"] -= Creating an htpasswd file using Linux - -To use the htpasswd identity provider, you must generate a flat file that -contains the user names and passwords for your cluster by using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. - -.Prerequisites - -* Have access to the `htpasswd` utility. On Red Hat Enterprise Linux -this is available by installing the `httpd-tools` package. - -.Procedure - -. Create or update your flat file with a user name and hashed password: -+ -[source,terminal] ----- -$ htpasswd -c -B -b ----- -+ -The command generates a hashed version of the password. -+ -For example: -+ -[source,terminal] ----- -$ htpasswd -c -B -b users.htpasswd ----- -+ -.Example output -[source,terminal] ----- -Adding password for user user1 ----- - -. Continue to add or update credentials to the file: -+ -[source,terminal] ----- -$ htpasswd -B -b ----- diff --git a/modules/identity-provider-creating-htpasswd-file-windows.adoc b/modules/identity-provider-creating-htpasswd-file-windows.adoc deleted file mode 100644 index ac38feb7311d..000000000000 --- a/modules/identity-provider-creating-htpasswd-file-windows.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-htpasswd-file-windows_{context}"] -= Creating an htpasswd file using Windows - -To use the htpasswd identity provider, you must generate a flat file that -contains the user names and passwords for your cluster by using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. - -.Prerequisites - -* Have access to `htpasswd.exe`. This file is included in the `\bin` -directory of many Apache httpd distributions. - -.Procedure - -. Create or update your flat file with a user name and hashed password: -+ -[source,terminal] ----- -> htpasswd.exe -c -B -b <\path\to\users.htpasswd> ----- -+ -The command generates a hashed version of the password. -+ -For example: -+ -[source,terminal] ----- -> htpasswd.exe -c -B -b users.htpasswd ----- -+ -.Example output -[source,terminal] ----- -Adding password for user user1 ----- - -. Continue to add or update credentials to the file: -+ -[source,terminal] ----- -> htpasswd.exe -b <\path\to\users.htpasswd> ----- diff --git a/modules/identity-provider-default-CR.adoc b/modules/identity-provider-default-CR.adoc deleted file mode 100644 index e6b081ca123b..000000000000 --- a/modules/identity-provider-default-CR.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-identity-provider.adoc -// * post_installation_configuration/preparing-for-users.adoc - -[id="identity-provider-default-CR_{context}"] -= Sample identity provider CR - -The following custom resource (CR) shows the parameters and default -values that you use to configure an identity provider. This example -uses the htpasswd identity provider. - -.Sample identity provider CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: my_identity_provider <1> - mappingMethod: claim <2> - type: HTPasswd - htpasswd: - fileData: - name: htpass-secret <3> ----- -<1> This provider name is prefixed to provider user names to form an -identity name. -<2> Controls how mappings are established between this provider's -identities and `User` objects. -<3> An existing secret containing a file generated using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. diff --git a/modules/identity-provider-github-CR.adoc b/modules/identity-provider-github-CR.adoc deleted file mode 100644 index aba23a0b8066..000000000000 --- a/modules/identity-provider-github-CR.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-github-identity-provider.adoc - -[id="identity-provider-github-CR_{context}"] -= Sample GitHub CR - -The following custom resource (CR) shows the parameters and acceptable values for a -GitHub identity provider. - -.GitHub CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: githubidp <1> - mappingMethod: claim <2> - type: GitHub - github: - ca: <3> - name: ca-config-map - clientID: {...} <4> - clientSecret: <5> - name: github-secret - hostname: ... <6> - organizations: <7> - - myorganization1 - - myorganization2 - teams: <8> - - myorganization1/team-a - - myorganization2/team-b ----- -<1> This provider name is prefixed to the GitHub numeric user ID to form an -identity name. It is also used to build the callback URL. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> Optional: Reference to an {product-title} `ConfigMap` object containing the -PEM-encoded certificate authority bundle to use in validating server -certificates for the configured URL. Only for use in GitHub Enterprise -with a non-publicly trusted root certificate. -<4> The client ID of a -link:https://github.com/settings/applications/new[registered GitHub OAuth -application]. The application must be configured with a callback URL of -`\https://oauth-openshift.apps../oauth2callback/`. -<5> Reference to an {product-title} `Secret` object containing the client secret -issued by GitHub. -<6> For GitHub Enterprise, you must provide the hostname of your instance, such as -`example.com`. This value must match the GitHub Enterprise `hostname` value in -in the `/setup/settings` file and cannot include a port number. If this -value is not set, then either `teams` or `organizations` must be defined. -For GitHub, omit this parameter. -<7> The list of organizations. Either the `organizations` or `teams` field must be set unless the `hostname` field is set, or if `mappingMethod` is set to `lookup`. Cannot be used in combination with the `teams` field. -<8> The list of teams. Either the `teams` or `organizations` field must be set unless the `hostname` field is set, or if `mappingMethod` is set to `lookup`. Cannot be used in combination with the `organizations` field. - -[NOTE] -==== -If `organizations` or `teams` is specified, only GitHub users that are members of -at least one of the listed organizations will be allowed to log in. If the GitHub OAuth -application configured in `clientID` is not owned by the organization, an organization -owner must grant third-party access to use this option. This can be done during -the first GitHub login by the organization's administrator, or from the GitHub organization settings. -==== diff --git a/modules/identity-provider-github-about.adoc b/modules/identity-provider-github-about.adoc deleted file mode 100644 index ea36c362006d..000000000000 --- a/modules/identity-provider-github-about.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-github-identity-provider.adoc - -[id="identity-provider-github-about_{context}"] -= About GitHub authentication - -Configuring link:https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/authorizing-oauth-apps[GitHub authentication] allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you can restrict access to only those in specific GitHub organizations. diff --git a/modules/identity-provider-gitlab-CR.adoc b/modules/identity-provider-gitlab-CR.adoc deleted file mode 100644 index 7c4b59b5b2f5..000000000000 --- a/modules/identity-provider-gitlab-CR.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc - -[id="identity-provider-gitlab-CR_{context}"] -= Sample GitLab CR - -The following custom resource (CR) shows the parameters and acceptable values for a -GitLab identity provider. - -.GitLab CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: gitlabidp <1> - mappingMethod: claim <2> - type: GitLab - gitlab: - clientID: {...} <3> - clientSecret: <4> - name: gitlab-secret - url: https://gitlab.com <5> - ca: <6> - name: ca-config-map ----- -<1> This provider name is prefixed to the GitLab numeric user ID to form an -identity name. It is also used to build the callback URL. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> The client ID of a -link:https://docs.gitlab.com/ce/api/oauth2.html[registered GitLab OAuth application]. -The application must be configured with a callback URL of -`\https://oauth-openshift.apps../oauth2callback/`. -<4> Reference to an {product-title} `Secret` object containing the client secret -issued by GitLab. -<5> The host URL of a GitLab provider. This could either be `\https://gitlab.com/` -or any other self hosted instance of GitLab. -<6> Optional: Reference to an {product-title} `ConfigMap` object containing the -PEM-encoded certificate authority bundle to use in validating server -certificates for the configured URL. diff --git a/modules/identity-provider-gitlab-about.adoc b/modules/identity-provider-gitlab-about.adoc deleted file mode 100644 index 5895124d044d..000000000000 --- a/modules/identity-provider-gitlab-about.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc - -[id="identity-provider-gitlab-about_{context}"] -= About GitLab authentication - -Configuring GitLab authentication allows users to log in to {product-title} with their GitLab credentials. - -If you use GitLab version 7.7.0 to 11.0, you connect using the link:http://doc.gitlab.com/ce/integration/oauth_provider.html[OAuth integration]. If you use GitLab version 11.1 or later, you can use link:https://docs.gitlab.com/ce/integration/openid_connect_provider.html[OpenID Connect] (OIDC) to connect instead of OAuth. diff --git a/modules/identity-provider-google-CR.adoc b/modules/identity-provider-google-CR.adoc deleted file mode 100644 index 273c4e332c74..000000000000 --- a/modules/identity-provider-google-CR.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-google-identity-provider.adoc - -[id="identity-provider-google-CR_{context}"] -= Sample Google CR - -The following custom resource (CR) shows the parameters and acceptable -values for a Google identity provider. - -.Google CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: googleidp <1> - mappingMethod: claim <2> - type: Google - google: - clientID: {...} <3> - clientSecret: <4> - name: google-secret - hostedDomain: "example.com" <5> ----- -<1> This provider name is prefixed to the Google numeric user ID to form an -identity name. It is also used to build the redirect URL. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> The client ID of a link:https://console.developers.google.com/[registered -Google project]. The project must be configured with a redirect URI of -`\https://oauth-openshift.apps../oauth2callback/`. -<4> Reference to an {product-title} `Secret` object containing the client secret -issued by Google. -<5> A -link:https://developers.google.com/identity/protocols/OpenIDConnect#hd-param[hosted domain] -used to restrict sign-in accounts. Optional if the `lookup` `mappingMethod` -is used. If empty, any Google account is allowed to authenticate. diff --git a/modules/identity-provider-google-about.adoc b/modules/identity-provider-google-about.adoc deleted file mode 100644 index 097cb79ff1fe..000000000000 --- a/modules/identity-provider-google-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-google-identity-provider.adoc - -[id="identity-provider-google-about_{context}"] -= About Google authentication - -Using Google as an identity provider allows any Google user to authenticate to your server. You can limit authentication to members of a specific hosted domain with the `hostedDomain` configuration attribute. - -[NOTE] -==== -Using Google as an identity provider requires users to get a token using `/oauth/token/request` to use with command-line tools. -==== diff --git a/modules/identity-provider-htpasswd-CR.adoc b/modules/identity-provider-htpasswd-CR.adoc deleted file mode 100644 index 14b9cd75cd3b..000000000000 --- a/modules/identity-provider-htpasswd-CR.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -[id="identity-provider-htpasswd-CR_{context}"] -= Sample htpasswd CR - -The following custom resource (CR) shows the parameters and acceptable values for an -htpasswd identity provider. - -.htpasswd CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: my_htpasswd_provider <1> - mappingMethod: claim <2> - type: HTPasswd - htpasswd: - fileData: - name: htpass-secret <3> ----- -<1> This provider name is prefixed to provider user names to form an identity -name. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> An existing secret containing a file generated using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. diff --git a/modules/identity-provider-htpasswd-about.adoc b/modules/identity-provider-htpasswd-about.adoc deleted file mode 100644 index bd84363eaea4..000000000000 --- a/modules/identity-provider-htpasswd-about.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -[id="identity-provider-htpasswd-about_{context}"] -= About htpasswd authentication - -Using htpasswd authentication in {product-title} allows you to identify users based on an htpasswd file. An htpasswd file is a flat file that contains the user name and hashed password for each user. You can use the `htpasswd` utility to create this file. diff --git a/modules/identity-provider-htpasswd-secret.adoc b/modules/identity-provider-htpasswd-secret.adoc deleted file mode 100644 index e43d172923a5..000000000000 --- a/modules/identity-provider-htpasswd-secret.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-htpasswd-secret_{context}"] -= Creating the htpasswd secret - -To use the htpasswd identity provider, you must define a secret that -contains the htpasswd user file. - -.Prerequisites - -* Create an htpasswd file. - -.Procedure - -* Create a `Secret` object that contains the htpasswd users file: -+ -[source,terminal] ----- -$ oc create secret generic htpass-secret --from-file=htpasswd= -n openshift-config <1> ----- -<1> The secret key containing the users file for the `--from-file` argument must be named `htpasswd`, as shown in the above command. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the secret: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: htpass-secret - namespace: openshift-config -type: Opaque -data: - htpasswd: ----- -==== diff --git a/modules/identity-provider-htpasswd-update-users.adoc b/modules/identity-provider-htpasswd-update-users.adoc deleted file mode 100644 index 5a2b2d128c11..000000000000 --- a/modules/identity-provider-htpasswd-update-users.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-htpasswd-update-users_{context}"] -= Updating users for an htpasswd identity provider - -You can add or remove users from an existing htpasswd identity provider. - -.Prerequisites - -* You have created a `Secret` object that contains the htpasswd user file. This procedure assumes that it is named `htpass-secret`. -* You have configured an htpasswd identity provider. This procedure assumes that it is named `my_htpasswd_provider`. -* You have access to the `htpasswd` utility. On Red Hat Enterprise Linux this is available by installing the `httpd-tools` package. -* You have cluster administrator privileges. - -.Procedure - -. Retrieve the htpasswd file from the `htpass-secret` `Secret` object and save the file to your file system: -+ -[source,terminal] ----- -$ oc get secret htpass-secret -ojsonpath={.data.htpasswd} -n openshift-config | base64 --decode > users.htpasswd ----- - -. Add or remove users from the `users.htpasswd` file. - -** To add a new user: -+ -[source,terminal] ----- -$ htpasswd -bB users.htpasswd ----- -+ -.Example output -[source,terminal] ----- -Adding password for user ----- - -** To remove an existing user: -+ -[source,terminal] ----- -$ htpasswd -D users.htpasswd ----- -+ -.Example output -[source,terminal] ----- -Deleting password for user ----- - -. Replace the `htpass-secret` `Secret` object with the updated users in the `users.htpasswd` file: -+ -[source,terminal] ----- -$ oc create secret generic htpass-secret --from-file=htpasswd=users.htpasswd --dry-run=client -o yaml -n openshift-config | oc replace -f - ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to replace the secret: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: htpass-secret - namespace: openshift-config -type: Opaque -data: - htpasswd: ----- -==== - -. If you removed one or more users, you must additionally remove existing resources for each user. - -.. Delete the `User` object: -+ -[source,terminal] ----- -$ oc delete user ----- -+ -.Example output -[source,terminal] ----- -user.user.openshift.io "" deleted ----- -+ -Be sure to remove the user, otherwise the user can continue using their token as long as it has not expired. - -.. Delete the `Identity` object for the user: -+ -[source,terminal] ----- -$ oc delete identity my_htpasswd_provider: ----- -+ -.Example output -[source,terminal] ----- -identity.user.openshift.io "my_htpasswd_provider:" deleted ----- diff --git a/modules/identity-provider-keystone-CR.adoc b/modules/identity-provider-keystone-CR.adoc deleted file mode 100644 index 09cb3c7c63de..000000000000 --- a/modules/identity-provider-keystone-CR.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc - -[id="identity-provider-keystone-CR_{context}"] -= Sample Keystone CR - -The following custom resource (CR) shows the parameters and acceptable values for a -Keystone identity provider. - -.Keystone CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: keystoneidp <1> - mappingMethod: claim <2> - type: Keystone - keystone: - domainName: default <3> - url: https://keystone.example.com:5000 <4> - ca: <5> - name: ca-config-map - tlsClientCert: <6> - name: client-cert-secret - tlsClientKey: <7> - name: client-key-secret ----- -<1> This provider name is prefixed to provider user names to form an identity name. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> Keystone domain name. In Keystone, usernames are domain-specific. Only a single domain is supported. -<4> The URL to use to connect to the Keystone server (required). This must -use https. -<5> Optional: Reference to an {product-title} `ConfigMap` object containing the -PEM-encoded certificate authority bundle to use in validating server -certificates for the configured URL. -<6> Optional: Reference to an {product-title} `Secret` object containing the client -certificate to present when making requests to the configured URL. -<7> Reference to an {product-title} `Secret` object containing the key for the -client certificate. Required if `tlsClientCert` is specified. diff --git a/modules/identity-provider-keystone-about.adoc b/modules/identity-provider-keystone-about.adoc deleted file mode 100644 index e86bc8c196d3..000000000000 --- a/modules/identity-provider-keystone-about.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc - -[id="identity-provider-keystone-about_{context}"] -= About Keystone authentication - -link:http://docs.openstack.org/developer/keystone/[Keystone] is an OpenStack project that provides identity, token, catalog, and policy services. - -You can configure the integration with Keystone so that the new {product-title} users are based on either the Keystone user names or unique Keystone IDs. With both methods, users log in by entering their Keystone user name and password. Basing the {product-title} users on the Keystone ID is more secure because if you delete a Keystone user and create a new Keystone user with that user name, the new user might have access to the old user's resources. diff --git a/modules/identity-provider-ldap-CR.adoc b/modules/identity-provider-ldap-CR.adoc deleted file mode 100644 index e78a8c9080d9..000000000000 --- a/modules/identity-provider-ldap-CR.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc - -[id="identity-provider-ldap-CR_{context}"] -= Sample LDAP CR - -The following custom resource (CR) shows the parameters and acceptable values for an -LDAP identity provider. - -.LDAP CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: ldapidp <1> - mappingMethod: claim <2> - type: LDAP - ldap: - attributes: - id: <3> - - dn - email: <4> - - mail - name: <5> - - cn - preferredUsername: <6> - - uid - bindDN: "" <7> - bindPassword: <8> - name: ldap-secret - ca: <9> - name: ca-config-map - insecure: false <10> - url: "ldaps://ldaps.example.com/ou=users,dc=acme,dc=com?uid" <11> ----- -<1> This provider name is prefixed to the returned user ID to form an identity -name. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> List of attributes to use as the identity. First non-empty attribute is -used. At least one attribute is required. If none of the listed attribute -have a value, authentication fails. Defined attributes are retrieved as raw, -allowing for binary values to be used. -<4> List of attributes to use as the email address. First non-empty -attribute is used. -<5> List of attributes to use as the display name. First non-empty -attribute is used. -<6> List of attributes to use as the preferred user name when provisioning a -user for this identity. First non-empty attribute is used. -<7> Optional DN to use to bind during the search phase. Must be set if -`bindPassword` is defined. -<8> Optional reference to an {product-title} `Secret` object containing the bind -password. Must be set if `bindDN` is defined. -<9> Optional: Reference to an {product-title} `ConfigMap` object containing the -PEM-encoded certificate authority bundle to use in validating server -certificates for the configured URL. Only used when `insecure` is `false`. -<10> When `true`, no TLS connection is made to the server. When `false`, -`ldaps://` URLs connect using TLS, and `ldap://` URLs are upgraded to TLS. -This must be set to `false` when `ldaps://` URLs are in use, as these -URLs always attempt to connect using TLS. -<11> An RFC 2255 URL which specifies the LDAP host and search parameters to use. - -[NOTE] -==== -To whitelist users for an LDAP integration, use the `lookup` mapping method. -Before a login from LDAP would be allowed, a cluster administrator must create -an `Identity` object and a `User` object for each LDAP user. -==== diff --git a/modules/identity-provider-ldap-secret.adoc b/modules/identity-provider-ldap-secret.adoc deleted file mode 100644 index 124eed26a24d..000000000000 --- a/modules/identity-provider-ldap-secret.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-ldap-secret_{context}"] -= Creating the LDAP secret - -To use the identity provider, you must define an {product-title} `Secret` object that contains the `bindPassword` field. - -.Procedure - -* Create a `Secret` object that contains the `bindPassword` field: -+ -[source,terminal] ----- -$ oc create secret generic ldap-secret --from-literal=bindPassword= -n openshift-config <1> ----- -<1> The secret key containing the bindPassword for the `--from-literal` argument must be called `bindPassword`. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the secret: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: ldap-secret - namespace: openshift-config -type: Opaque -data: - bindPassword: ----- -==== diff --git a/modules/identity-provider-oidc-CR.adoc b/modules/identity-provider-oidc-CR.adoc deleted file mode 100644 index 940b9293329c..000000000000 --- a/modules/identity-provider-oidc-CR.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-oidc-CR_{context}"] -= Sample OpenID Connect CRs - -The following custom resources (CRs) show the parameters and acceptable values for an OpenID Connect identity provider. - -If you must specify a custom certificate bundle, extra scopes, extra authorization request parameters, or a `userInfo` URL, use the full OpenID Connect CR. - -.Standard OpenID Connect CR -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: oidcidp <1> - mappingMethod: claim <2> - type: OpenID - openID: - clientID: ... <3> - clientSecret: <4> - name: idp-secret - claims: <5> - preferredUsername: - - preferred_username - name: - - name - email: - - email - groups: - - groups - issuer: https://www.idp-issuer.com <6> ----- -<1> This provider name is prefixed to the value of the identity claim to form an identity name. It is also used to build the redirect URL. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> The client ID of a client registered with the OpenID provider. The client must be allowed to redirect to `\https://oauth-openshift.apps../oauth2callback/`. -<4> A reference to an {product-title} `Secret` object containing the client secret. -<5> The list of claims to use as the identity. The first non-empty claim is used. -<6> The link:https://openid.net/specs/openid-connect-core-1_0.html#IssuerIdentifier[Issuer Identifier] described in the OpenID spec. Must use `https` without query or fragment component. - -.Full OpenID Connect CR -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: oidcidp - mappingMethod: claim - type: OpenID - openID: - clientID: ... - clientSecret: - name: idp-secret - ca: <1> - name: ca-config-map - extraScopes: <2> - - email - - profile - extraAuthorizeParameters: <3> - include_granted_scopes: "true" - claims: - preferredUsername: <4> - - preferred_username - - email - name: <5> - - nickname - - given_name - - name - email: <6> - - custom_email_claim - - email - groups: <7> - - groups - issuer: https://www.idp-issuer.com ----- -<1> Optional: Reference to an {product-title} config map containing the PEM-encoded certificate authority bundle to use in validating server certificates for the configured URL. -<2> Optional: The list of scopes to request, in addition to the `openid` scope, during the authorization token request. -<3> Optional: A map of extra parameters to add to the authorization token request. -<4> The list of claims to use as the preferred user name when provisioning a user -for this identity. The first non-empty claim is used. -<5> The list of claims to use as the display name. The first non-empty claim is used. -<6> The list of claims to use as the email address. The first non-empty claim is used. -<7> The list of claims to use to synchronize groups from the OpenID Connect provider to {product-title} upon user login. The first non-empty claim is used. diff --git a/modules/identity-provider-oidc-about.adoc b/modules/identity-provider-oidc-about.adoc deleted file mode 100644 index 9c4db575d893..000000000000 --- a/modules/identity-provider-oidc-about.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-oidc-about_{context}"] -= About OpenID Connect authentication - -The Authentication Operator in {product-title} requires that the configured OpenID Connect identity provider implements the link:https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] specification. - -ifdef::openshift-origin[] -You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#openshift[configure a Keycloak] server as an OpenID Connect identity provider for {product-title}. -endif::[] - -[NOTE] -==== -`ID Token` and `UserInfo` decryptions are not supported. -==== - -By default, the `openid` scope is requested. If required, extra scopes can be specified in the `extraScopes` field. - -Claims are read from the JWT `id_token` returned from the OpenID identity provider and, if specified, from the JSON returned by the `UserInfo` URL. - -At least one claim must be configured to use as the user's identity. The standard identity claim is `sub`. - -You can also indicate which claims to use as the user's preferred user name, display name, and email address. If multiple claims are specified, the first one with a non-empty value is used. The following table lists the standard claims: - -[cols="1,2",options="header"] -|=== - -|Claim -|Description - -|`sub` -|Short for "subject identifier." The remote identity for the user at the -issuer. - -|`preferred_username` -|The preferred user name when provisioning a user. A shorthand name that the user wants to be referred to as, such as `janedoe`. Typically a value that corresponding to the user's login or username in the authentication system, such as username or email. - -|`email` -|Email address. - -|`name` -|Display name. -|=== - -See the link:http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID claims documentation] for more information. - -[NOTE] -==== -Unless your OpenID Connect identity provider supports the resource owner password credentials (ROPC) grant flow, users must get a token from `/oauth/token/request` to use with command-line tools. -==== diff --git a/modules/identity-provider-oidc-supported.adoc b/modules/identity-provider-oidc-supported.adoc deleted file mode 100644 index d2b12014c727..000000000000 --- a/modules/identity-provider-oidc-supported.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-oidc-supported_{context}"] -= Supported OIDC providers - -Red Hat tests and supports specific OpenID Connect (OIDC) providers with {product-title}. The following OpenID Connect (OIDC) providers are tested and supported with {product-title}. Using an OIDC provider that is not on the following list might work with {product-title}, but the provider was not tested by Red Hat and therefore is not supported by Red Hat. - -* Active Directory Federation Services for Windows Server -+ -[NOTE] -==== -Currently, it is not supported to use Active Directory Federation Services for Windows Server with {product-title} when custom claims are used. -==== -* GitLab -* Google -* Keycloak -* Microsoft identity platform (Azure Active Directory v2.0) -+ -[NOTE] -==== -Currently, it is not supported to use Microsoft identity platform when group names are required to be synced. -==== -* Okta -* Ping Identity -* Red Hat Single Sign-On diff --git a/modules/identity-provider-overview.adoc b/modules/identity-provider-overview.adoc deleted file mode 100644 index d650bc4df595..000000000000 --- a/modules/identity-provider-overview.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-identity-provider.adoc -// * authentication/identity_providers/configuring-allow-all-identity-provider.adoc -// * authentication/identity_providers/configuring-deny-all-identity-provider.adoc -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: CONCEPT -[id="identity-provider-overview_{context}"] -= About identity providers in {product-title} - -By default, only a `kubeadmin` user exists on your cluster. To specify an -identity provider, you must create a custom resource (CR) that describes -that identity provider and add it to the cluster. - -[NOTE] -==== -{product-title} user names containing `/`, `:`, and `%` are not supported. -==== diff --git a/modules/identity-provider-parameters.adoc b/modules/identity-provider-parameters.adoc deleted file mode 100644 index 98365bf21d21..000000000000 --- a/modules/identity-provider-parameters.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-identity-provider.adoc -// * post_installation_configuration/preparing-for-users.adoc - -[id="identity-provider-parameters_{context}"] -= Identity provider parameters - -The following parameters are common to all identity providers: - -[cols="2a,8a",options="header"] -|=== -|Parameter | Description -|`name` | The provider name is prefixed to provider user names to form an -identity name. - -|`mappingMethod` | Defines how new identities are mapped to users when they log in. -Enter one of the following values: - -claim:: The default value. Provisions a user with the identity's preferred -user name. Fails if a user with that user name is already mapped to another -identity. -lookup:: Looks up an existing identity, user identity mapping, and user, -but does not automatically provision users or identities. This allows cluster -administrators to set up identities and users manually, or using an external -process. Using this method requires you to manually provision users. -generate:: Provisions a user with the identity's preferred user name. If a -user with the preferred user name is already mapped to an existing identity, a -unique user name is generated. For example, `myuser2`. This method should not be -used in combination with external processes that require exact matches between -{product-title} user names and identity provider user names, such as LDAP group -sync. -add:: Provisions a user with the identity's preferred user name. If a user -with that user name already exists, the identity is mapped to the existing user, -adding to any existing identity mappings for the user. Required when multiple -identity providers are configured that identify the same set of users and map to -the same user names. -|=== - -[NOTE] -When adding or changing identity providers, you can map identities from the new -provider to existing users by setting the `mappingMethod` parameter to -`add`. diff --git a/modules/identity-provider-registering-github.adoc b/modules/identity-provider-registering-github.adoc deleted file mode 100644 index a23099cfe6c6..000000000000 --- a/modules/identity-provider-registering-github.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-github-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-registering-github_{context}"] -= Registering a GitHub application - -To use GitHub or GitHub Enterprise as an identity provider, you must register -an application to use. - -.Procedure - -. Register an application on GitHub: -** For GitHub, click https://github.com/settings/profile[*Settings*] -> -https://github.com/settings/apps[*Developer settings*] -> -https://github.com/settings/developers[*OAuth Apps*] -> -https://github.com/settings/applications/new[*Register a new OAuth application*]. -** For GitHub Enterprise, go to your GitHub Enterprise home page and then click -*Settings -> Developer settings -> Register a new application*. -. Enter an application name, for example `My OpenShift Install`. -. Enter a homepage URL, such as -`\https://oauth-openshift.apps..`. -. Optional: Enter an application description. -. Enter the authorization callback URL, where the end of the URL contains the -identity provider `name`: -+ ----- -https://oauth-openshift.apps../oauth2callback/ ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/github ----- -. Click *Register application*. GitHub provides a client ID and a client secret. -You need these values to complete the identity provider configuration. diff --git a/modules/identity-provider-request-header-CR.adoc b/modules/identity-provider-request-header-CR.adoc deleted file mode 100644 index 447b3285e143..000000000000 --- a/modules/identity-provider-request-header-CR.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc - -[id="identity-provider-request-header-CR_{context}"] -= Sample request header CR - -The following custom resource (CR) shows the parameters and -acceptable values for a request header identity provider. - -.Request header CR - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: requestheaderidp <1> - mappingMethod: claim <2> - type: RequestHeader - requestHeader: - challengeURL: "https://www.example.com/challenging-proxy/oauth/authorize?${query}" <3> - loginURL: "https://www.example.com/login-proxy/oauth/authorize?${query}" <4> - ca: <5> - name: ca-config-map - clientCommonNames: <6> - - my-auth-proxy - headers: <7> - - X-Remote-User - - SSO-User - emailHeaders: <8> - - X-Remote-User-Email - nameHeaders: <9> - - X-Remote-User-Display-Name - preferredUsernameHeaders: <10> - - X-Remote-User-Login ----- -<1> This provider name is prefixed to the user name in the request header to -form an identity name. -<2> Controls how mappings are established between this provider's identities and `User` objects. -<3> Optional: URL to redirect unauthenticated `/oauth/authorize` requests to, -that will authenticate browser-based clients and then proxy their request to -`https://__/oauth/authorize`. -The URL that proxies to `https://__/oauth/authorize` must end with `/authorize` (with no trailing slash), -and also proxy subpaths, in order for OAuth approval flows to work properly. -`${url}` is replaced with the current URL, escaped to be safe in a query parameter. -`${query}` is replaced with the current query string. -If this attribute is not defined, then `loginURL` must be used. -<4> Optional: URL to redirect unauthenticated `/oauth/authorize` requests to, -that will authenticate clients which expect `WWW-Authenticate` challenges, and -then proxy them to `https://__/oauth/authorize`. -`${url}` is replaced with the current URL, escaped to be safe in a query parameter. -`${query}` is replaced with the current query string. -If this attribute is not defined, then `challengeURL` must be used. -<5> Reference to an {product-title} `ConfigMap` object containing a PEM-encoded -certificate bundle. Used as a trust anchor to validate the TLS -certificates presented by the remote server. -+ -[IMPORTANT] -==== -As of {product-title} 4.1, the `ca` field is required for this identity -provider. This means that your proxy must support mutual TLS. -==== -<6> Optional: list of common names (`cn`). If set, a valid client certificate with -a Common Name (`cn`) in the specified list must be presented before the request headers -are checked for user names. If empty, any Common Name is allowed. Can only be used in combination -with `ca`. -<7> Header names to check, in order, for the user identity. The first header containing -a value is used as the identity. Required, case-insensitive. -<8> Header names to check, in order, for an email address. The first header containing -a value is used as the email address. Optional, case-insensitive. -<9> Header names to check, in order, for a display name. The first header containing -a value is used as the display name. Optional, case-insensitive. -<10> Header names to check, in order, for a preferred user name, if different than the immutable -identity determined from the headers specified in `headers`. The first header containing -a value is used as the preferred user name when provisioning. Optional, case-insensitive. diff --git a/modules/identity-provider-secret-tls.adoc b/modules/identity-provider-secret-tls.adoc deleted file mode 100644 index 6b22d24e83ff..000000000000 --- a/modules/identity-provider-secret-tls.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-secret-tls_{context}"] -= Creating the secret - -Identity providers use {product-title} `Secret` objects in the `openshift-config` namespace to contain the client secret, client certificates, and keys. - -.Procedure - -* Create a `Secret` object that contains the key and certificate by using the following command: -+ -[source,terminal] ----- -$ oc create secret tls --key=key.pem --cert=cert.pem -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the secret: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: openshift-config -type: kubernetes.io/tls -data: - tls.crt: - tls.key: ----- -==== diff --git a/modules/identity-provider-secret.adoc b/modules/identity-provider-secret.adoc deleted file mode 100644 index 5b8554b453ef..000000000000 --- a/modules/identity-provider-secret.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -:_content-type: PROCEDURE -[id="identity-provider-creating-secret_{context}"] -= Creating the secret - -Identity providers use {product-title} `Secret` objects in the `openshift-config` namespace to contain the client secret, client certificates, and keys. - -.Procedure - -* Create a `Secret` object containing a string by using the following command: -+ -[source,terminal] ----- -$ oc create secret generic --from-literal=clientSecret= -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the secret: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: openshift-config -type: Opaque -data: - clientSecret: ----- -==== - -* You can define a `Secret` object containing the contents of a file, such as a certificate file, by using the following command: -+ -[source,terminal] ----- -$ oc create secret generic --from-file= -n openshift-config ----- diff --git a/modules/idle-idling-applications.adoc b/modules/idle-idling-applications.adoc deleted file mode 100644 index 8620ccee24a2..000000000000 --- a/modules/idle-idling-applications.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/idling-applications.adoc - -:_content-type: PROCEDURE -[id="idle-idling-applications_{context}"] -= Idling applications - -Idling an application involves finding the scalable resources (deployment -configurations, replication controllers, and others) associated with a service. -Idling an application finds the service and marks it as idled, scaling down the -resources to zero replicas. - -You can use the `oc idle` command to idle a single service, or use the -`--resource-names-file` option to idle multiple services. - -[id="idle-idling-applications-single_{context}"] -== Idling a single service - -.Procedure - -. To idle a single service, run: -+ -[source,terminal] ----- -$ oc idle ----- - -[id="idle-idling-applications-multiple_{context}"] -== Idling multiple services - -Idling multiple services is helpful if an application spans across a set of -services within a project, or when idling multiple services in conjunction with -a script to idle multiple applications in bulk within the same project. - -.Procedure - -. Create a file containing a list of the services, each on their own line. - -. Idle the services using the `--resource-names-file` option: -+ -[source,terminal] ----- -$ oc idle --resource-names-file ----- - -[NOTE] -==== -The `idle` command is limited to a single project. For idling applications across -a cluster, run the `idle` command for each project individually. -==== diff --git a/modules/idle-unidling-applications.adoc b/modules/idle-unidling-applications.adoc deleted file mode 100644 index fbe6b94c2c7a..000000000000 --- a/modules/idle-unidling-applications.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/idling-applications.adoc - -:_content-type: PROCEDURE -[id="idle-unidling-applications_{context}"] -= Unidling applications - -Application services become active again when they receive network traffic and -are scaled back up their previous state. This includes both traffic to the -services and traffic passing through routes. - -Applications can also be manually unidled by scaling up the resources. - -.Procedure - -. To scale up a DeploymentConfig, run: -+ -[source,terminal] ----- -$ oc scale --replicas=1 dc ----- - -[NOTE] -==== -Automatic unidling by a router is currently only supported by the default -HAProxy router. -==== -[NOTE] -==== -Services do not support automatic unidling if you configure Kuryr-Kubernetes as an SDN. -==== diff --git a/modules/ignition-config-viewing.adoc b/modules/ignition-config-viewing.adoc deleted file mode 100644 index a7588e3f44f9..000000000000 --- a/modules/ignition-config-viewing.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture_rhcos.adoc - -[id="ignition-config-viewing_{context}"] -= Viewing Ignition configuration files - -To see the Ignition config file used to deploy the bootstrap machine, run the -following command: - -[source,terminal] ----- -$ openshift-install create ignition-configs --dir $HOME/testconfig ----- - -After you answer a few questions, the `bootstrap.ign`, `master.ign`, and -`worker.ign` files appear in the directory you entered. - -To see the contents of the `bootstrap.ign` file, pipe it through the `jq` filter. -Here's a snippet from that file: - -[source,terminal] ----- -$ cat $HOME/testconfig/bootstrap.ign | jq -{ - "ignition": { - "version": "3.2.0" - }, - "passwd": { - "users": [ - { - "name": "core", - "sshAuthorizedKeys": [ - "ssh-rsa AAAAB3NzaC1yc...." - ] - } - ] - }, - "storage": { - "files": [ - { - "overwrite": false, - "path": "/etc/motd", - "user": { - "name": "root" - }, - "append": [ - { - "source": "data:text/plain;charset=utf-8;base64,VGhpcyBpcyB0aGUgYm9vdHN0cmFwIG5vZGU7IGl0IHdpbGwgYmUgZGVzdHJveWVkIHdoZW4gdGhlIG1hc3RlciBpcyBmdWxseSB1cC4KClRoZSBwcmltYXJ5IHNlcnZpY2VzIGFyZSByZWxlYXNlLWltYWdlLnNlcnZpY2UgZm9sbG93ZWQgYnkgYm9vdGt1YmUuc2VydmljZS4gVG8gd2F0Y2ggdGhlaXIgc3RhdHVzLCBydW4gZS5nLgoKICBqb3VybmFsY3RsIC1iIC1mIC11IHJlbGVhc2UtaW1hZ2Uuc2VydmljZSAtdSBib290a3ViZS5zZXJ2aWNlCg==" - } - ], - "mode": 420 - }, -... ----- - -To decode the contents of a file listed in the `bootstrap.ign` file, pipe the -base64-encoded data string representing the contents of that file to the `base64 --d` command. Here's an example using the contents of the `/etc/motd` file added to -the bootstrap machine from the output shown above: - -[source,terminal] ----- -$ echo VGhpcyBpcyB0aGUgYm9vdHN0cmFwIG5vZGU7IGl0IHdpbGwgYmUgZGVzdHJveWVkIHdoZW4gdGhlIG1hc3RlciBpcyBmdWxseSB1cC4KClRoZSBwcmltYXJ5IHNlcnZpY2VzIGFyZSByZWxlYXNlLWltYWdlLnNlcnZpY2UgZm9sbG93ZWQgYnkgYm9vdGt1YmUuc2VydmljZS4gVG8gd2F0Y2ggdGhlaXIgc3RhdHVzLCBydW4gZS5nLgoKICBqb3VybmFsY3RsIC1iIC1mIC11IHJlbGVhc2UtaW1hZ2Uuc2VydmljZSAtdSBib290a3ViZS5zZXJ2aWNlCg== | base64 --decode ----- - -.Example output -[source,terminal] ----- -This is the bootstrap node; it will be destroyed when the master is fully up. - -The primary services are release-image.service followed by bootkube.service. To watch their status, run e.g. - - journalctl -b -f -u release-image.service -u bootkube.service ----- - -Repeat those commands on the `master.ign` and `worker.ign` files to see the source -of Ignition config files for each of those machine types.  You should see a line -like the following for the `worker.ign`, identifying how it gets its Ignition -config from the bootstrap machine: - -[source,terminal] ----- -"source": "https://api.myign.develcluster.example.com:22623/config/worker", ----- - -Here are a few things you can learn from the `bootstrap.ign` file: + - -* Format: The format of the file is defined in the -https://coreos.github.io/ignition/configuration-v3_2/[Ignition config spec]. -Files of the same format are used later by the MCO to merge changes into a -machine's configuration. -* Contents: Because the bootstrap machine serves the Ignition configs for other -machines, both master and worker machine Ignition config information is stored in the -`bootstrap.ign`, along with the bootstrap machine's configuration. -* Size: The file is more than 1300 lines long, with path to various types of resources. -* The content of each file that will be copied to the machine is actually encoded -into data URLs, which tends to make the content a bit clumsy to read. (Use the - `jq` and `base64` commands shown previously to make the content more readable.) -* Configuration: The different sections of the Ignition config file are generally - meant to contain files that are just dropped into a machine's file system, rather - than commands to modify existing files. For example, instead of having a section - on NFS that configures that service, you would just add an NFS configuration - file, which would then be started by the init process when the system comes up. -* users: A user named `core` is created, with your SSH key assigned to that user. -This allows you to log in to the cluster with that user name and your -credentials. -* storage: The storage section identifies files that are added to each machine. A -few notable files include `/root/.docker/config.json` (which provides credentials - your cluster needs to pull from container image registries) and a bunch of - manifest files in `/opt/openshift/manifests` that are used to configure your cluster. -* systemd: The `systemd` section holds content used to create `systemd` unit files. -Those files are used to start up services at boot time, as well as manage those -services on running systems. -* Primitives: Ignition also exposes low-level primitives that other tools can -build on. diff --git a/modules/images-about.adoc b/modules/images-about.adoc deleted file mode 100644 index c7626887d077..000000000000 --- a/modules/images-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-about_{context}"] -= Images - -Containers in {product-title} are based on OCI- or Docker-formatted container _images_. An image is a binary that includes all of the requirements for running a single container, as well as metadata describing its needs and capabilities. - -You can think of it as a packaging technology. Containers only have access to resources defined in the image unless you give the container additional access when creating it. By deploying the same image in multiple containers across multiple hosts and load balancing between them, {product-title} can provide redundancy and horizontal scaling for a service packaged into an image. - -You can use the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/managing_containers/#using_podman_to_work_with_containers[podman] or `docker` CLI directly to build images, but {product-title} also supplies builder images that assist with creating new images by adding your code or configuration to existing images. - -Because applications develop over time, a single image name can actually refer to many different versions of the same image. Each different image is referred to uniquely by its hash, a long hexadecimal number such as `fd44297e2ddb050ec4f...`, which is usually shortened to 12 characters, such as `fd44297e2ddb`. diff --git a/modules/images-add-tags-to-imagestreams.adoc b/modules/images-add-tags-to-imagestreams.adoc deleted file mode 100644 index 1c1c3ca0d5f3..000000000000 --- a/modules/images-add-tags-to-imagestreams.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -:_content-type: PROCEDURE -[id="images-add-tags-to-imagestreams_{context}"] -= Adding tags to image streams - -An image stream in {product-title} comprises zero or more container images identified by tags. - -There are different types of tags available. The default behavior uses a `permanent` tag, which points to a specific image in time. If the `permanent` tag is in use and the source changes, the tag does not change for the destination. - -A `tracking` tag means the destination tag's metadata is updated during the import of the source tag. - -.Procedure - -* You can add tags to an image stream using the `oc tag` command: -+ -[source,terminal] ----- -$ oc tag ----- -+ -For example, to configure the `ruby` image stream `static-2.0` tag to always refer to the current image for the `ruby` image stream `2.0` tag: -+ -[source,terminal] ----- -$ oc tag ruby:2.0 ruby:static-2.0 ----- -+ -This creates a new image stream tag named `static-2.0` in the `ruby` image stream. The new tag directly references the image id that the `ruby:2.0` image stream tag pointed to at the time `oc tag` was run, and the image it points to never changes. - -* To ensure the destination tag is updated when the source tag changes, use the `--alias=true` flag: -+ -[source,terminal] ----- -$ oc tag --alias=true ----- - -[NOTE] -==== -Use a tracking tag for creating permanent aliases, for example, `latest` or `stable`. The tag only works correctly within a single image stream. Trying to create a cross-image stream alias produces an error. -==== - -* You can also add the `--scheduled=true` flag to have the destination tag be -refreshed, or re-imported, periodically. The period is configured globally at -the system level. - -* The `--reference` flag creates an image stream tag that is not imported. The tag points to the source location, permanently. -+ -If you want to instruct {product-title} to always fetch the tagged image from the integrated registry, use `--reference-policy=local`. The registry uses the pull-through feature to serve the image to the client. By default, the image blobs are mirrored locally by the registry. As a result, they can be pulled more quickly the next time they are needed. The flag also allows for pulling from insecure registries without a need to supply `--insecure-registry` to the container runtime as long as the image stream has an insecure annotation or the tag has an insecure import policy. diff --git a/modules/images-allow-pods-to-reference-images-across-projects.adoc b/modules/images-allow-pods-to-reference-images-across-projects.adoc deleted file mode 100644 index 258a61227950..000000000000 --- a/modules/images-allow-pods-to-reference-images-across-projects.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/using-image-pull-secrets - -:_content-type: PROCEDURE -[id="images-allow-pods-to-reference-images-across-projects_{context}"] -= Allowing pods to reference images across projects - -When using the {product-registry}, to allow pods in `project-a` to reference images in `project-b`, a service account in `project-a` must be bound to the `system:image-puller` role in `project-b`. - -[NOTE] -==== -When you create a pod service account or a namespace, wait until the service account is provisioned with a docker pull secret; if you create a pod before its service account is fully provisioned, the pod fails to access the {product-registry}. -==== - -.Procedure - -. To allow pods in `project-a` to reference images in `project-b`, bind a service account in `project-a` to the `system:image-puller` role in `project-b`: -+ -[source,terminal] ----- -$ oc policy add-role-to-user \ - system:image-puller system:serviceaccount:project-a:default \ - --namespace=project-b ----- -+ -After adding that role, the pods in `project-a` that reference the default service account are able to pull images from `project-b`. - -. To allow access for any service account in `project-a`, use the group: -+ -[source,terminal] ----- -$ oc policy add-role-to-group \ - system:image-puller system:serviceaccounts:project-a \ - --namespace=project-b ----- diff --git a/modules/images-allow-pods-to-reference-images-from-secure-registries.adoc b/modules/images-allow-pods-to-reference-images-from-secure-registries.adoc deleted file mode 100644 index 8b148c76bf92..000000000000 --- a/modules/images-allow-pods-to-reference-images-from-secure-registries.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/using-image-pull-secrets -// * openshift_images/managing-image-streams.adoc - -:_content-type: PROCEDURE -[id="images-allow-pods-to-reference-images-from-secure-registries_{context}"] -= Allowing pods to reference images from other secured registries - -The `.dockercfg` `$HOME/.docker/config.json` file for Docker clients is a Docker credentials file that stores your authentication information if you have previously logged into a secured or insecure registry. - -To pull a secured container image that is not from {product-registry}, you must create a pull secret from your Docker credentials and add it to your service account. - -The Docker credentials file and the associated pull secret can contain multiple references to the same registry, each with its own set of credentials. - -.Example `config.json` file -[source,json] ----- -{ - "auths":{ - "cloud.openshift.com":{ - "auth":"b3Blb=", - "email":"you@example.com" - }, - "quay.io":{ - "auth":"b3Blb=", - "email":"you@example.com" - }, - "quay.io/repository-main":{ - "auth":"b3Blb=", - "email":"you@example.com" - } - } -} ----- - -.Example pull secret -[source,yaml] ----- -apiVersion: v1 -data: - .dockerconfigjson: ewogICAiYXV0aHMiOnsKICAgICAgIm0iOnsKICAgICAgIsKICAgICAgICAgImF1dGgiOiJiM0JsYj0iLAogICAgICAgICAiZW1haWwiOiJ5b3VAZXhhbXBsZS5jb20iCiAgICAgIH0KICAgfQp9Cg== -kind: Secret -metadata: - creationTimestamp: "2021-09-09T19:10:11Z" - name: pull-secret - namespace: default - resourceVersion: "37676" - uid: e2851531-01bc-48ba-878c-de96cfe31020 -type: Opaque ----- - -.Procedure - -* If you already have a `.dockercfg` file for the secured registry, you can create a secret from that file by running: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=.dockercfg= \ - --type=kubernetes.io/dockercfg ----- - -* Or if you have a `$HOME/.docker/config.json` file: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- - -* If you do not already have a Docker credentials file for the secured registry, you can create a secret by running: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server= \ - --docker-username= \ - --docker-password= \ - --docker-email= ----- - -* To use a secret for pulling images for pods, you must add the secret to your service account. The name of the service account in this example should match the name of the service account the pod uses. The default service account is `default`: -+ -[source,terminal] ----- -$ oc secrets link default --for=pull ----- diff --git a/modules/images-cluster-sample-imagestream-import.adoc b/modules/images-cluster-sample-imagestream-import.adoc deleted file mode 100644 index 14277214d97a..000000000000 --- a/modules/images-cluster-sample-imagestream-import.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="images-cluster-sample-imagestream-import_{context}"] -= Configuring periodic importing of Cluster Sample Operator image stream tags - -You can ensure that you always have access to the latest versions of the Cluster Sample Operator images by periodically importing the image stream tags when new versions become available. - -.Procedure - -. Fetch all the imagestreams in the `openshift` namespace by running the following command: -+ -[source,terminal] ----- -oc get imagestreams -nopenshift ----- - -. Fetch the tags for every imagestream in the `openshift` namespace by running the following command: -+ -[source, terminal] ----- -$ oc get is -o jsonpath="{range .spec.tags[*]}{.name}{'\t'}{.from.name}{'\n'}{end}" -nopenshift ----- -+ -For example: -+ -[source, terminal] ----- -$ oc get is ubi8-openjdk-17 -o jsonpath="{range .spec.tags[*]}{.name}{'\t'}{.from.name}{'\n'}{end}" -nopenshift ----- -+ -.Example output -[source, terminal] ----- -1.11 registry.access.redhat.com/ubi8/openjdk-17:1.11 -1.12 registry.access.redhat.com/ubi8/openjdk-17:1.12 ----- - -. Schedule periodic importing of images for each tag present in the image stream by running the following command: -+ -[source,terminal] ----- -$ oc tag --scheduled -nopenshift ----- -+ -For example: -+ -[source,terminal] ----- -$ oc tag registry.access.redhat.com/ubi8/openjdk-17:1.11 ubi8-openjdk-17:1.11 --scheduled -nopenshift -$ oc tag registry.access.redhat.com/ubi8/openjdk-17:1.12 ubi8-openjdk-17:1.12 --scheduled -nopenshift ----- -+ -This command causes {product-title} to periodically update this particular image stream tag. This period is a cluster-wide setting set to 15 minutes by default. - -. Verify the scheduling status of the periodic import by running the following command: -+ -[source,terminal] ----- -oc get imagestream -o jsonpath="{range .spec.tags[*]}Tag: {.name}{'\t'}Scheduled: {.importPolicy.scheduled}{'\n'}{end}" -nopenshift ----- -+ -For example: -+ -[source,terminal] ----- -oc get imagestream ubi8-openjdk-17 -o jsonpath="{range .spec.tags[*]}Tag: {.name}{'\t'}Scheduled: {.importPolicy.scheduled}{'\n'}{end}" -nopenshift ----- -+ -.Example output -[source,terminal] ----- -Tag: 1.11 Scheduled: true -Tag: 1.12 Scheduled: true ----- \ No newline at end of file diff --git a/modules/images-configuration-allowed.adoc b/modules/images-configuration-allowed.adoc deleted file mode 100644 index 97d12ea64aec..000000000000 --- a/modules/images-configuration-allowed.adoc +++ /dev/null @@ -1,180 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-allowed_{context}"] -= Adding specific registries - -You can add a list of registries, and optionally an individual repository within a registry, that are permitted for image pull and push actions by editing the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster. - -When pulling or pushing images, the container runtime searches the registries listed under the `registrySources` parameter in the `image.config.openshift.io/cluster` CR. If you created a list of registries under the `allowedRegistries` parameter, the container runtime searches only those registries. Registries not in the list are blocked. - -[WARNING] -==== -When the `allowedRegistries` parameter is defined, all registries, including the `registry.redhat.io` and `quay.io` registries and the default {product-registry}, are blocked unless explicitly listed. If you use the parameter, to prevent pod failure, add the `registry.redhat.io` and `quay.io` registries and the `internalRegistryHostname` to the `allowedRegistries` list, as they are required by payload images within your environment. For disconnected clusters, mirror registries should also be added. -==== - -.Procedure - -. Edit the `image.config.openshift.io/cluster` CR: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -The following is an example `image.config.openshift.io/cluster` CR with an allowed list: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 1 - name: cluster - resourceVersion: "8302" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: e34555da-78a9-11e9-b92b-06d6c7da38dc -spec: - registrySources: <1> - allowedRegistries: <2> - - example.com - - quay.io - - registry.redhat.io - - reg1.io/myrepo/myapp:latest - - image-registry.openshift-image-registry.svc:5000 -status: - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- -<1> Contains configurations that determine how the container runtime should treat individual registries when accessing images for builds and pods. It does not contain configuration for the internal cluster registry. -<2> Specify registries, and optionally a repository in that registry, to use for image pull and push actions. All other registries are blocked. -+ -[NOTE] -==== -Either the `allowedRegistries` parameter or the `blockedRegistries` parameter can be set, but not both. -==== -+ -The Machine Config Operator (MCO) watches the `image.config.openshift.io/cluster` resource for any changes to the registries. When the MCO detects a change, it drains the nodes, applies the change, and uncordons the nodes. After the nodes return to the `Ready` state, the allowed registries list is used to update the image signature policy in the `/host/etc/containers/policy.json` file on each node. - -. To check that the registries have been added to the policy file, use the following command on a node: -+ -[source,terminal] ----- -$ cat /host/etc/containers/policy.json ----- -+ -The following policy indicates that only images from the example.com, quay.io, and registry.redhat.io registries are permitted for image pulls and pushes: -+ -.Example image signature policy file -[%collapsible] -==== -[source,terminal] ----- -{ - "default":[ - { - "type":"reject" - } - ], - "transports":{ - "atomic":{ - "example.com":[ - { - "type":"insecureAcceptAnything" - } - ], - "image-registry.openshift-image-registry.svc:5000":[ - { - "type":"insecureAcceptAnything" - } - ], - "insecure.com":[ - { - "type":"insecureAcceptAnything" - } - ], - "quay.io":[ - { - "type":"insecureAcceptAnything" - } - ], - "reg4.io/myrepo/myapp:latest":[ - { - "type":"insecureAcceptAnything" - } - ], - "registry.redhat.io":[ - { - "type":"insecureAcceptAnything" - } - ] - }, - "docker":{ - "example.com":[ - { - "type":"insecureAcceptAnything" - } - ], - "image-registry.openshift-image-registry.svc:5000":[ - { - "type":"insecureAcceptAnything" - } - ], - "insecure.com":[ - { - "type":"insecureAcceptAnything" - } - ], - "quay.io":[ - { - "type":"insecureAcceptAnything" - } - ], - "reg4.io/myrepo/myapp:latest":[ - { - "type":"insecureAcceptAnything" - } - ], - "registry.redhat.io":[ - { - "type":"insecureAcceptAnything" - } - ] - }, - "docker-daemon":{ - "":[ - { - "type":"insecureAcceptAnything" - } - ] - } - } -} ----- -==== - -[NOTE] -==== -If your cluster uses the `registrySources.insecureRegistries` parameter, ensure that any insecure registries are included in the allowed list. - -For example: - -[source,yml] ----- -spec: - registrySources: - insecureRegistries: - - insecure.com - allowedRegistries: - - example.com - - quay.io - - registry.redhat.io - - insecure.com - - image-registry.openshift-image-registry.svc:5000 ----- -==== diff --git a/modules/images-configuration-blocked-payload.adoc b/modules/images-configuration-blocked-payload.adoc deleted file mode 100644 index e1e231c089d5..000000000000 --- a/modules/images-configuration-blocked-payload.adoc +++ /dev/null @@ -1,70 +0,0 @@ -//Modules included in the following assemblies -// -// * openshift_images/image-configuration.adoc - -:_content-type: PROCEDURE -[id="images-configuration-blocked-payload"] - -= Blocking a payload registry - -In a mirroring configuration, you can block upstream payload registries in a disconnected environment using a `ImageContentSourcePolicy` (ICSP) object. The following example procedure demonstrates how to block the `quay.io/openshift-payload` payload registry. - -.Procedure -. Create the mirror configuration using an `ImageContentSourcePolicy` (ICSP) object to mirror the payload to a registry in your instance. The following example ICSP file mirrors the payload `internal-mirror.io/openshift-payload`: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: ImageContentSourcePolicy -metadata: - name: my-icsp -spec: - repositoryDigestMirrors: - - mirrors: - - internal-mirror.io/openshift-payload - source: quay.io/openshift-payload ----- -. After the object deploys onto your nodes, verify that the mirror configuration is set by checking the `/etc/containers/registries.conf` file: -+ -.Example output -[source,terminal] ----- -[[registry]] - prefix = "" - location = "quay.io/openshift-payload" - mirror-by-digest-only = true - -[[registry.mirror]] - location = "internal-mirror.io/openshift-payload" ----- -. Use the following command to edit the `image.config.openshift.io` custom resource file: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io cluster ----- -. To block the payload registry, add the following configuration to the `image.config.openshift.io` custom resource file: -+ -[source,yaml] ----- -spec: - registrySource: - blockedRegistries: - - quay.io/openshift-payload ----- - -.Verification -* Verify that the upstream payload registry is blocked by checking the `/etc/containers/registries.conf` file on the node. -+ -.Example output -[source,terminal] ----- -[[registry]] - prefix = "" - location = "quay.io/openshift-payload" - blocked = true - mirror-by-digest-only = true - -[[registry.mirror]] - location = "internal-mirror.io/openshift-payload" ----- diff --git a/modules/images-configuration-blocked.adoc b/modules/images-configuration-blocked.adoc deleted file mode 100644 index dfe4a3699464..000000000000 --- a/modules/images-configuration-blocked.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-blocked_{context}"] -= Blocking specific registries - -You can block any registry, and optionally an individual repository within a registry, by editing the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster. - -When pulling or pushing images, the container runtime searches the registries listed under the `registrySources` parameter in the `image.config.openshift.io/cluster` CR. If you created a list of registries under the `blockedRegistries` parameter, the container runtime does not search those registries. All other registries are allowed. - -[WARNING] -==== -To prevent pod failure, do not add the `registry.redhat.io` and `quay.io` registries to the `blockedRegistries` list, as they are required by payload images within your environment. -==== - -.Procedure - -. Edit the `image.config.openshift.io/cluster` CR: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -The following is an example `image.config.openshift.io/cluster` CR with a blocked list: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 1 - name: cluster - resourceVersion: "8302" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: e34555da-78a9-11e9-b92b-06d6c7da38dc -spec: - registrySources: <1> - blockedRegistries: <2> - - untrusted.com - - reg1.io/myrepo/myapp:latest -status: - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- -<1> Contains configurations that determine how the container runtime should treat individual registries when accessing images for builds and pods. It does not contain configuration for the internal cluster registry. -<2> Specify registries, and optionally a repository in that registry, that should not be used for image pull and push actions. All other registries are allowed. -+ -[NOTE] -==== -Either the `blockedRegistries` registry or the `allowedRegistries` registry can be set, but not both. -==== -+ -The Machine Config Operator (MCO) watches the `image.config.openshift.io/cluster` resource for any changes to the registries. When the MCO detects a change, it drains the nodes, applies the change, and uncordons the nodes. After the nodes return to the `Ready` state, changes to the blocked registries appear in the `/etc/containers/registries.conf` file on each node. - -. To check that the registries have been added to the policy file, use the following command on a node: -+ -[source,terminal] ----- -$ cat /host/etc/containers/registries.conf ----- -+ -The following example indicates that images from the `untrusted.com` registry are prevented for image pulls and pushes: -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] - -[[registry]] - prefix = "" - location = "untrusted.com" - blocked = true ----- diff --git a/modules/images-configuration-cas.adoc b/modules/images-configuration-cas.adoc deleted file mode 100644 index ff93fd4a9b9e..000000000000 --- a/modules/images-configuration-cas.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * registry/configuring-registry-operator.adoc -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-cas_{context}"] -= Configuring additional trust stores for image registry access - -The `image.config.openshift.io/cluster` custom resource can contain a reference to a config map that contains additional certificate authorities to be trusted during image registry access. - -.Prerequisites -* The certificate authorities (CA) must be PEM-encoded. - -.Procedure - -You can create a config map in the `openshift-config` namespace and use its name in `AdditionalTrustedCA` in the `image.config.openshift.io` custom resource to provide additional CAs that should be trusted when contacting external registries. - -The config map key is the hostname of a registry with the port for which this CA is to be trusted, and the PEM certificate content is the value, for each additional registry CA to trust. - -.Image registry CA config map example -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-registry-ca -data: - registry.example.com: | - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- - registry-with-port.example.com..5000: | <1> - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- ----- -<1> If the registry has the port, such as `registry-with-port.example.com:5000`, `:` should be replaced with `..`. - -You can configure additional CAs with the following procedure. - -. To configure an additional CA: -+ -[source,terminal] ----- -$ oc create configmap registry-config --from-file==ca.crt -n openshift-config ----- -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io cluster ----- -+ -[source,yaml] ----- -spec: - additionalTrustedCA: - name: registry-config ----- diff --git a/modules/images-configuration-file.adoc b/modules/images-configuration-file.adoc deleted file mode 100644 index 97713601699c..000000000000 --- a/modules/images-configuration-file.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-file_{context}"] -= Configuring image registry settings - -You can configure image registry settings by editing the `image.config.openshift.io/cluster` custom resource (CR). -When changes to the registry are applied to the `image.config.openshift.io/cluster` CR, the Machine Config Operator (MCO) performs the following sequential actions: - -. Cordons the node -. Applies changes by restarting CRI-O -. Uncordons the node -+ -[NOTE] -==== -The MCO does not restart nodes when it detects changes. -==== - -.Procedure - -. Edit the `image.config.openshift.io/cluster` custom resource: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -The following is an example `image.config.openshift.io/cluster` CR: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image <1> -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 1 - name: cluster - resourceVersion: "8302" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: e34555da-78a9-11e9-b92b-06d6c7da38dc -spec: - allowedRegistriesForImport: <2> - - domainName: quay.io - insecure: false - additionalTrustedCA: <3> - name: myconfigmap - registrySources: <4> - allowedRegistries: - - example.com - - quay.io - - registry.redhat.io - - image-registry.openshift-image-registry.svc:5000 - - reg1.io/myrepo/myapp:latest - insecureRegistries: - - insecure.com -status: - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- -<1> `Image`: Holds cluster-wide information about how to handle images. The canonical, and only valid name is `cluster`. -<2> `allowedRegistriesForImport`: Limits the container image registries from which normal users may import images. Set this list to the registries that you trust to contain valid images, and that you want applications to be able to import from. Users with permission to create images or `ImageStreamMappings` from the API are not affected by this policy. Typically only cluster administrators have the appropriate permissions. -<3> `additionalTrustedCA`: A reference to a config map containing additional certificate authorities (CA) that are trusted during image stream import, pod image pull, `openshift-image-registry` pullthrough, and builds. The namespace for this config map is `openshift-config`. The format of the config map is to use the registry hostname as the key, and the PEM certificate as the value, for each additional registry CA to trust. -<4> `registrySources`: Contains configuration that determines whether the container runtime allows or blocks individual registries when accessing images for builds and pods. Either the `allowedRegistries` parameter or the `blockedRegistries` parameter can be set, but not both. You can also define whether or not to allow access to insecure registries or registries that allow registries that use image short names. This example uses the `allowedRegistries` parameter, which defines the registries that are allowed to be used. The insecure registry `insecure.com` is also allowed. The `registrySources` parameter does not contain configuration for the internal cluster registry. -+ -[NOTE] -==== -When the `allowedRegistries` parameter is defined, all registries, including the registry.redhat.io and quay.io registries and the default {product-registry}, are blocked unless explicitly listed. If you use the parameter, to prevent pod failure, you must add the `registry.redhat.io` and `quay.io` registries and the `internalRegistryHostname` to the `allowedRegistries` list, as they are required by payload images within your environment. Do not add the `registry.redhat.io` and `quay.io` registries to the `blockedRegistries` list. - -When using the `allowedRegistries`, `blockedRegistries`, or `insecureRegistries` parameter, you can specify an individual repository within a registry. For example: `reg1.io/myrepo/myapp:latest`. - -Insecure external registries should be avoided to reduce possible security risks. -==== - -. To check that the changes are applied, list your nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-137-182.us-east-2.compute.internal Ready,SchedulingDisabled worker 65m v1.27.3 -ip-10-0-139-120.us-east-2.compute.internal Ready,SchedulingDisabled control-plane 74m v1.27.3 -ip-10-0-176-102.us-east-2.compute.internal Ready control-plane 75m v1.27.3 -ip-10-0-188-96.us-east-2.compute.internal Ready worker 65m v1.27.3 -ip-10-0-200-59.us-east-2.compute.internal Ready worker 63m v1.27.3 -ip-10-0-223-123.us-east-2.compute.internal Ready control-plane 73m v1.27.3 ----- diff --git a/modules/images-configuration-insecure.adoc b/modules/images-configuration-insecure.adoc deleted file mode 100644 index a8d07d0c62f5..000000000000 --- a/modules/images-configuration-insecure.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-insecure_{context}"] -= Allowing insecure registries - -You can add insecure registries, and optionally an individual repository within a registry, by editing the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster. - -Registries that do not use valid SSL certificates or do not require HTTPS connections are considered insecure. - -[WARNING] -==== -Insecure external registries should be avoided to reduce possible security risks. -==== - -.Procedure - -. Edit the `image.config.openshift.io/cluster` CR: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -The following is an example `image.config.openshift.io/cluster` CR with an insecure registries list: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 1 - name: cluster - resourceVersion: "8302" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: e34555da-78a9-11e9-b92b-06d6c7da38dc -spec: - registrySources: <1> - insecureRegistries: <2> - - insecure.com - - reg4.io/myrepo/myapp:latest - allowedRegistries: - - example.com - - quay.io - - registry.redhat.io - - insecure.com <3> - - reg4.io/myrepo/myapp:latest - - image-registry.openshift-image-registry.svc:5000 -status: - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- -<1> Contains configurations that determine how the container runtime should treat individual registries when accessing images for builds and pods. It does not contain configuration for the internal cluster registry. -<2> Specify an insecure registry. You can specify a repository in that registry. -<3> Ensure that any insecure registries are included in the `allowedRegistries` list. -+ -[NOTE] -==== -When the `allowedRegistries` parameter is defined, all registries, including the registry.redhat.io and quay.io registries and the default {product-registry}, are blocked unless explicitly listed. If you use the parameter, to prevent pod failure, add all registries including the `registry.redhat.io` and `quay.io` registries and the `internalRegistryHostname` to the `allowedRegistries` list, as they are required by payload images within your environment. For disconnected clusters, mirror registries should also be added. -==== -+ -The Machine Config Operator (MCO) watches the `image.config.openshift.io/cluster` CR for any changes to the registries, then drains and uncordons the nodes when it detects changes. After the nodes return to the `Ready` state, changes to the insecure and blocked registries appear in the `/etc/containers/registries.conf` file on each node. - -. To check that the registries have been added to the policy file, use the following command on a node: -+ -[source,terminal] ----- -$ cat /host/etc/containers/registries.conf ----- -+ -The following example indicates that images from the `insecure.com` registry is insecure and is allowed for image pulls and pushes. -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] - -[[registry]] - prefix = "" - location = "insecure.com" - insecure = true ----- diff --git a/modules/images-configuration-parameters.adoc b/modules/images-configuration-parameters.adoc deleted file mode 100644 index d0354231367c..000000000000 --- a/modules/images-configuration-parameters.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -[id="images-configuration-parameters_{context}"] -= Image controller configuration parameters - -The `image.config.openshift.io/cluster` resource holds cluster-wide information about how to handle images. The canonical, and only valid name is `cluster`. Its `spec` offers the following configuration parameters. - -[NOTE] -==== -Parameters such as `DisableScheduledImport`, `MaxImagesBulkImportedPerRepository`, `MaxScheduledImportsPerMinute`, `ScheduledImageImportMinimumIntervalSeconds`, `InternalRegistryHostname` are not configurable. -==== - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`allowedRegistriesForImport` -|Limits the container image registries from which normal users can import images. Set this list to the registries that you trust to contain valid images, and that you want applications to be able to import from. Users with permission to create images or `ImageStreamMappings` from the API are not affected by this policy. Typically only cluster administrators have the appropriate permissions. - -Every element of this list contains a location of the registry specified by the registry domain name. - -`domainName`: Specifies a domain name for the registry. If the registry uses a non-standard `80` or `443` port, the port should be included in the domain name as well. - -`insecure`: Insecure indicates whether the registry is secure or insecure. By default, if not otherwise specified, the registry is assumed to be secure. - -|`additionalTrustedCA` -|A reference to a config map containing additional CAs that should be trusted during `image stream import`, `pod image pull`, `openshift-image-registry pullthrough`, and builds. - -The namespace for this config map is `openshift-config`. The format of the config map is to use the registry hostname as the key, and the PEM-encoded certificate as the value, for each additional registry CA to trust. - -|`externalRegistryHostnames` -|Provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in `publicDockerImageRepository` field in image streams. The value must be in `hostname[:port]` format. - -|`registrySources` -|Contains configuration that determines how the container runtime should treat individual registries when accessing images for builds and -pods. For instance, whether or not to allow insecure access. It does not contain configuration for the internal cluster registry. - -`insecureRegistries`: Registries which do not have a valid TLS certificate or only support HTTP connections. To specify all subdomains, add the asterisk (`\*`) wildcard character as a prefix to the domain name. For example, `*.example.com`. You can specify an individual repository within a registry. For example: `reg1.io/myrepo/myapp:latest`. - -`blockedRegistries`: Registries for which image pull and push actions are denied. To specify all subdomains, add the asterisk (`\*`) wildcard character as a prefix to the domain name. For example, `*.example.com`. You can specify an individual repository within a registry. For example: `reg1.io/myrepo/myapp:latest`. All other registries are allowed. - -`allowedRegistries`: Registries for which image pull and push actions are allowed. To specify all subdomains, add the asterisk (`\*`) wildcard character as a prefix to the domain name. For example, `*.example.com`. You can specify an individual repository within a registry. For example: `reg1.io/myrepo/myapp:latest`. All other registries are blocked. - -`containerRuntimeSearchRegistries`: Registries for which image pull and push actions are allowed using image short names. All other registries are blocked. - -Either `blockedRegistries` or `allowedRegistries` can be set, but not both. - -|=== - -[WARNING] -==== -When the `allowedRegistries` parameter is defined, all registries, including `registry.redhat.io` and `quay.io` registries and the default {product-registry}, are blocked unless explicitly listed. When using the parameter, to prevent pod failure, add all registries including the `registry.redhat.io` and `quay.io` registries and the `internalRegistryHostname` to the `allowedRegistries` list, as they are required by payload images within your environment. For disconnected clusters, mirror registries should also be added. -==== - -The `status` field of the `image.config.openshift.io/cluster` resource holds observed values from the cluster. - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`internalRegistryHostname` -|Set by the Image Registry Operator, which controls the `internalRegistryHostname`. It sets the hostname for the default {product-registry}. The value must be in `hostname[:port]` format. For backward compatibility, you can still use the `OPENSHIFT_DEFAULT_REGISTRY` environment variable, but this setting overrides the environment variable. - -|`externalRegistryHostnames` -|Set by the Image Registry Operator, provides the external hostnames for the image registry when it is exposed externally. The first value is used in `publicDockerImageRepository` field in image streams. The values must be in `hostname[:port]` format. - -|=== diff --git a/modules/images-configuration-registry-mirror-convert.adoc b/modules/images-configuration-registry-mirror-convert.adoc deleted file mode 100644 index f38e2d11aa91..000000000000 --- a/modules/images-configuration-registry-mirror-convert.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc - -:_content-type: PROCEDURE -[id="images-configuration-registry-mirror-convert_{context}"] -= Converting ImageContentSourcePolicy (ICSP) files for image registry repository mirroring - -Using an `ImageContentSourcePolicy` (ICSP) object to configure repository mirroring is a deprecated feature. This functionality is still included in {product-title} and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -ICSP objects are being replaced by `ImageDigestMirrorSet` and `ImageTagMirrorSet` objects to configure repository mirroring. If you have existing YAML files that you used to create `ImageContentSourcePolicy` objects, you can use the `oc adm migrate icsp` command to convert those files to an `ImageDigestMirrorSet` YAML file. The command updates the API to the current version, changes the `kind` value to `ImageDigestMirrorSet`, and changes `spec.repositoryDigestMirrors` to `spec.imageDigestMirrors`. The rest of the file is not changed. - -For more information about `ImageDigestMirrorSet` or `ImageTagMirrorSet` objects, see "Configuring image registry repository mirroring" in the previous section. - -.Prerequisites - -* Ensure that you have access to the cluster as a user with the `cluster-admin` role. - -* Ensure that you have `ImageContentSourcePolicy` objects on your cluster. - -.Procedure - -. Use the following command to convert one or more `ImageContentSourcePolicy` YAML files to an `ImageDigestMirrorSet` YAML file: -+ -[source,terminal] ----- -$ oc adm migrate icsp .yaml .yaml .yaml --dest-dir ----- -+ --- -where: - -``:: Specifies the name of the source `ImageContentSourcePolicy` YAML. You can list multiple file names. -`--dest-dir`:: Optional: Specifies a directory for the output `ImageDigestMirrorSet` YAML. If unset, the file is written to the current directory. --- -+ -For example, the following command converts the `icsp.yaml` and `icsp-2.yaml` file and saves the new YAML files to the `idms-files` directory. -+ -[source,terminal] ----- -$ oc adm migrate icsp icsp.yaml icsp-2.yaml --dest-dir idms-files ----- -+ -.Example output -[source,terminal] ----- -wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi8repo.5911620242173376087.yaml -wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi9repo.6456931852378115011.yaml ----- - -. Create the CR object by running the following command: -+ -[source,terminal] ----- -$ oc create -f /.yaml ----- -+ --- -where: - -``:: Specifies the path to the directory, if you used the `--dest-dir` flag. -``:: Specifies the name of the `ImageDigestMirrorSet` YAML. --- - diff --git a/modules/images-configuration-registry-mirror.adoc b/modules/images-configuration-registry-mirror.adoc deleted file mode 100644 index 9f5d767a6e1c..000000000000 --- a/modules/images-configuration-registry-mirror.adoc +++ /dev/null @@ -1,299 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc - -:_content-type: PROCEDURE -[id="images-configuration-registry-mirror_{context}"] -= Configuring image registry repository mirroring - -Setting up container registry repository mirroring enables you to perform the following tasks: - -* Configure your {product-title} cluster to redirect requests to pull images from a repository on a source image registry and have it resolved by a repository on a mirrored image registry. -* Identify multiple mirrored repositories for each target repository, to make sure that if one mirror is down, another can be used. - -Repository mirroring in {product-title} includes the following attributes: - -* Image pulls are resilient to registry downtimes. -* Clusters in disconnected environments can pull images from critical locations, such as quay.io, and have registries behind a company firewall provide the requested images. -* A particular order of registries is tried when an image pull request is made, with the permanent registry typically being the last one tried. -* The mirror information you enter is added to the `/etc/containers/registries.conf` file on every node in the {product-title} cluster. -* When a node makes a request for an image from the source repository, it tries each mirrored repository in turn until it finds the requested content. If all mirrors fail, the cluster tries the source repository. If successful, the image is pulled to the node. - -Setting up repository mirroring can be done in the following ways: - -* At {product-title} installation: -+ -By pulling container images needed by {product-title} and then bringing those images behind your company's firewall, you can install {product-title} into a datacenter that is in a disconnected environment. - -* After {product-title} installation: -+ -If you did not configure mirroring during {product-title} installation, you can do so post-installation by using one of the following custom resource (CR) objects: -+ --- -** `ImageDigestMirrorSet`. This CR allows you to pull images from a mirrored registry by using digest specifications. -+ -** `ImageTagMirrorSet`. This CR allows you to pull images from a mirrored registry by using image tags. --- -+ -[IMPORTANT] -==== -Using an `ImageContentSourcePolicy` (ICSP) object to configure repository mirroring is a deprecated feature. Deprecated functionality is still included in {product-title} and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. If you have existing YAML files that you used to create `ImageContentSourcePolicy` objects, you can use the `oc adm migrate icsp` command to convert those files to an `ImageDigestMirrorSet` YAML file. For more information, see "Converting ImageContentSourcePolicy (ICSP) files for image registry repository mirroring" in the following section. -==== - -Both of these custom resource objects identify the following information: --- -* The source of the container image repository you want to mirror. -* A separate entry for each mirror repository you want to offer the content -requested from the source repository. --- - -[NOTE] -==== -If your cluster uses an `ImageDigestMirrorSet` or `ImageTagMirrorSet` object to configure repository mirroring, you can use only global pull secrets for mirrored registries. You cannot add a pull secret to a project. -==== - -The following procedure creates a post-installation mirror configuration, where you create an `ImageDigestMirrorSet` object. - -.Prerequisites -* Ensure that you have access to the cluster as a user with the `cluster-admin` role. - -* Ensure that there are no `ImageContentSourcePolicy` objects on your cluster. For example, you can use the following command: -+ -[source, terminal] ----- -$ oc get ImageContentSourcePolicy ----- -+ -.Example output -[source, terminal] ----- -No resources found ----- - -.Procedure - -. Configure mirrored repositories, by either: -+ -* Setting up a mirrored repository with Red Hat Quay, as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/repo-mirroring-in-red-hat-quay[Red Hat Quay Repository Mirroring]. Using Red Hat Quay allows you to copy images from one repository to another and also automatically sync those repositories repeatedly over time. -* Using a tool such as `skopeo` to copy images manually from the source directory to the mirrored repository. -+ -For example, after installing the skopeo RPM package on a Red Hat Enterprise Linux (RHEL) 7 or RHEL 8 system, use the `skopeo` command as shown in this example: -+ -[source,terminal] ----- -$ skopeo copy \ -docker://registry.access.redhat.com/ubi9/ubi-minimal:latest@sha256:5cf... \ -docker://example.io/example/ubi-minimal ----- -+ -In this example, you have a container image registry that is named `example.io` with an image repository named `example` to which you want to copy the `ubi9/ubi-minimal` image from `registry.access.redhat.com`. After you create the registry, you can configure your {product-title} cluster to redirect requests made of the source repository to the mirrored repository. - -. Log in to your {product-title} cluster. - -. Create an `ImageDigestMirrorSet` or `ImageTagMirrorSet` CR, as needed, replacing the source and mirrors with your own registry and repository pairs and images: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 <1> -kind: ImageDigestMirrorSet <2> -metadata: - name: ubi9repo -spec: - imageDigestMirrors: <3> - - mirrors: - - example.io/example/ubi-minimal <4> - - example.com/example/ubi-minimal <5> - source: registry.access.redhat.com/ubi9/ubi-minimal <6> - mirrorSourcePolicy: AllowContactingSource <7> - - mirrors: - - mirror.example.com/redhat - source: registry.redhat.io/openshift4 <8> - mirrorSourcePolicy: AllowContactingSource - - mirrors: - - mirror.example.com - source: registry.redhat.io <9> - mirrorSourcePolicy: AllowContactingSource - - mirrors: - - mirror.example.net/image - source: registry.example.com/example/myimage <10> - mirrorSourcePolicy: AllowContactingSource - - mirrors: - - mirror.example.net - source: registry.example.com/example <11> - mirrorSourcePolicy: AllowContactingSource - - mirrors: - - mirror.example.net/registry-example-com - source: registry.example.com <12> - mirrorSourcePolicy: AllowContactingSource ----- -<1> Indicates the API to use with this CR. This must be `config.openshift.io/v1`. -<2> Indicates the kind of object according to the pull type: -** `ImageDigestMirrorSet`: Pulls a digest reference image. -** `ImageTagMirrorSet`: Pulls a tag reference image. -<3> Indicates the type of image pull method, either: -** `imageDigestMirrors`: Use for an `ImageDigestMirrorSet` CR. -** `imageTagMirrors`: Use for an `ImageTagMirrorSet` CR. -<4> Indicates the name of the mirrored image registry and repository. -<5> Optional: Indicates a secondary mirror repository for each target repository. If one mirror is down, the target repository can use another mirror. -<6> Indicates the registry and repository source, which is the repository that is referred to in image pull specifications. -<7> Optional: Indicates the fallback policy if the image pull fails: -** `AllowContactingSource`: Allows continued attempts to pull the image from the source repository. This is the default. -** `NeverContactSource`: Prevents continued attempts to pull the image from the source repository. -<8> Optional: Indicates a namespace inside a registry, which allows you to use any image in that namespace. If you use a registry domain as a source, the object is applied to all repositories from the registry. -<9> Optional: Indicates a registry, which allows you to use any image in that registry. If you specify a registry name, the object is applied to all repositories from a source registry to a mirror registry. -<10> Pulls the image `registry.example.com/example/myimage@sha256:...` from the mirror `mirror.example.net/image@sha256:..`. -<11> Pulls the image `registry.example.com/example/image@sha256:...` in the source registry namespace from the mirror `mirror.example.net/image@sha256:...`. -<12> Pulls the image `registry.example.com/myimage@sha256` from the mirror registry `example.net/registry-example-com/myimage@sha256:...`. The `ImageContentSourcePolicy` resource is applied to all repositories from a source registry to a mirror registry `mirror.example.net/registry-example-com`. - -. Create the new object: -+ -[source,terminal] ----- -$ oc create -f registryrepomirror.yaml ----- -+ -After the object is created, the Machine Config Operator (MCO) cordons the nodes as the new settings are deployed to each node. The MCO restarts the nodes for an `ImageTagMirrorSet` object only. The MCO does not restart the nodes for `ImageDigestMirrorSet` objects. When the nodes are uncordoned, the cluster starts using the mirrored repository for requests to the source repository. - -. To check that the mirrored configuration settings are applied, do the following on one of the nodes. - -.. List your nodes: -+ -[source,terminal] ----- -$ oc get node ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-137-44.ec2.internal Ready worker 7m v1.27.3 -ip-10-0-138-148.ec2.internal Ready master 11m v1.27.3 -ip-10-0-139-122.ec2.internal Ready master 11m v1.27.3 -ip-10-0-147-35.ec2.internal Ready worker 7m v1.27.3 -ip-10-0-153-12.ec2.internal Ready worker 7m v1.27.3 -ip-10-0-154-10.ec2.internal Ready master 11m v1.27.3 ----- - -.. Start the debugging process to access the node: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-147-35.ec2.internal ----- -+ -.Example output -[source,terminal] ----- -Starting pod/ip-10-0-147-35ec2internal-debug ... -To use host binaries, run `chroot /host` ----- - -.. Change your root directory to `/host`: -+ -[source,terminal] ----- -sh-4.2# chroot /host ----- - -.. Check the `/etc/containers/registries.conf` file to make sure -the changes were made: -+ -[source,terminal] ----- -sh-4.2# cat /etc/containers/registries.conf ----- -+ -The following output represents a `registries.conf` file where an `ImageDigestMirrorSet` object and an `ImageTagMirrorSet` object were applied. The final two entries are marked `digest-only` and `tag-only` respectively. -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] -short-name-mode = "" - -[[registry]] - prefix = "" - location = "registry.access.redhat.com/ubi9/ubi-minimal" <1> - - [[registry.mirror]] - location = "example.io/example/ubi-minimal" <2> - pull-from-mirror = "digest-only" <3> - - [[registry.mirror]] - location = "example.com/example/ubi-minimal" - pull-from-mirror = "digest-only" - -[[registry]] - prefix = "" - location = "registry.example.com" - - [[registry.mirror]] - location = "mirror.example.net/registry-example-com" - pull-from-mirror = "digest-only" - -[[registry]] - prefix = "" - location = "registry.example.com/example" - - [[registry.mirror]] - location = "mirror.example.net" - pull-from-mirror = "digest-only" - -[[registry]] - prefix = "" - location = "registry.example.com/example/myimage" - - [[registry.mirror]] - location = "mirror.example.net/image" - pull-from-mirror = "digest-only" - -[[registry]] - prefix = "" - location = "registry.redhat.io" - - [[registry.mirror]] - location = "mirror.example.com" - pull-from-mirror = "digest-only" - -[[registry]] - prefix = "" - location = "registry.redhat.io/openshift4" - - [[registry.mirror]] - location = "mirror.example.com/redhat" - pull-from-mirror = "digest-only" -[[registry]] - prefix = "" - location = "registry.access.redhat.com/ubi9/ubi-minimal" - blocked = true <4> - - [[registry.mirror]] - location = "example.io/example/ubi-minimal-tag" - pull-from-mirror = "tag-only" <5> ----- -<1> Indicates the repository that is referred to in a pull spec. -<2> Indicates the mirror for that repository. -<3> Indicates that the image pull from the mirror is a digest reference image. -<4> Indicates that the `NeverContactSource` parameter is set for this repository. -<5> Indicates that the image pull from the mirror is a tag reference image. - -.. Pull an image to the node from the source and check if it is resolved by the mirror. -+ -[source,terminal] ----- -sh-4.2# podman pull --log-level=debug registry.access.redhat.com/ubi9/ubi-minimal@sha256:5cf... ----- - -.Troubleshooting repository mirroring - -If the repository mirroring procedure does not work as described, use the following information about how repository mirroring works to help troubleshoot the problem. - -* The first working mirror is used to supply the pulled image. -* The main registry is only used if no other mirror works. -* From the system context, the `Insecure` flags are used as fallback. -* The format of the `/etc/containers/registries.conf` file has changed recently. It is now version 2 and in TOML format. -* You cannot add the same repository to both an `ImageDigestMirrorSet` and an `ImageTagMirrorSet` object. - diff --git a/modules/images-configuration-shortname.adoc b/modules/images-configuration-shortname.adoc deleted file mode 100644 index a2b7b0f87274..000000000000 --- a/modules/images-configuration-shortname.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/image-configuration.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="images-configuration-shortname_{context}"] -= Adding registries that allow image short names - -You can add registries to search for an image short name by editing the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster. - -An image short name enables you to search for images without including the fully qualified domain name in the pull spec. For example, you could use `rhel7/etcd` instead of `registry.access.redhat.com/rhe7/etcd`. - -You might use short names in situations where using the full path is not practical. For example, if your cluster references multiple internal registries whose DNS changes frequently, you would need to update the fully qualified domain names in your pull specs with each change. In this case, using an image short name might be beneficial. - -When pulling or pushing images, the container runtime searches the registries listed under the `registrySources` parameter in the `image.config.openshift.io/cluster` CR. If you created a list of registries under the `containerRuntimeSearchRegistries` parameter, when pulling an image with a short name, the container runtime searches those registries. - -[WARNING] -==== -Using image short names with public registries is strongly discouraged because the image might not deploy if the public registry requires authentication. Use fully-qualified image names with public registries. - -Red Hat internal or private registries typically support the use of image short names. - -If you list public registries under the `containerRuntimeSearchRegistries` parameter, you expose your credentials to all the registries on the list and you risk network and registry attacks. - -You cannot list multiple public registries under the `containerRuntimeSearchRegistries` parameter if each public registry requires different credentials and a cluster does not list the public registry in the global pull secret. - -For a public registry that requires authentication, you can use an image short name only if the registry has its credentials stored in the global pull secret. -//// -Potentially add the last line to the Ignoring image registry repository mirroring section. -//// -==== - -The Machine Config Operator (MCO) watches the `image.config.openshift.io/cluster` resource for any changes to the registries. When the MCO detects a change, it drains the nodes, applies the change, and uncordons the nodes. After the nodes return to the `Ready` state, if the `containerRuntimeSearchRegistries` parameter is added, the MCO creates a file in the `/etc/containers/registries.conf.d` directory on each node with the listed registries. The file overrides the default list of unqualified search registries in the `/host/etc/containers/registries.conf` file. There is no way to fall back to the default list of unqualified search registries. - -The `containerRuntimeSearchRegistries` parameter works only with the Podman and CRI-O container engines. The registries in the list can be used only in pod specs, not in builds and image streams. - -.Procedure - -. Edit the `image.config.openshift.io/cluster` custom resource: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -The following is an example `image.config.openshift.io/cluster` CR: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 1 - name: cluster - resourceVersion: "8302" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: e34555da-78a9-11e9-b92b-06d6c7da38dc -spec: - allowedRegistriesForImport: - - domainName: quay.io - insecure: false - additionalTrustedCA: - name: myconfigmap - registrySources: - containerRuntimeSearchRegistries: <1> - - reg1.io - - reg2.io - - reg3.io - allowedRegistries: <2> - - example.com - - quay.io - - registry.redhat.io - - reg1.io - - reg2.io - - reg3.io - - image-registry.openshift-image-registry.svc:5000 -... -status: - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- -<1> Specify registries to use with image short names. You should use image short names with only internal or private registries to reduce possible security risks. -<2> Ensure that any registries listed under `containerRuntimeSearchRegistries` are included in the `allowedRegistries` list. -+ -[NOTE] -==== -When the `allowedRegistries` parameter is defined, all registries, including the `registry.redhat.io` and `quay.io` registries and the default {product-registry}, are blocked unless explicitly listed. If you use this parameter, to prevent pod failure, add all registries including the `registry.redhat.io` and `quay.io` registries and the `internalRegistryHostname` to the `allowedRegistries` list, as they are required by payload images within your environment. For disconnected clusters, mirror registries should also be added. -==== - -. To check that the registries have been added, when a node returns to the `Ready` state, use the following command on the node: -+ -[source,terminal] ----- -$ cat /host/etc/containers/registries.conf.d/01-image-searchRegistries.conf ----- -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ['reg1.io', 'reg2.io', 'reg3.io'] ----- - diff --git a/modules/images-container-repository-about.adoc b/modules/images-container-repository-about.adoc deleted file mode 100644 index b4441b12c56e..000000000000 --- a/modules/images-container-repository-about.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-container-repository-about_{context}"] -= Image repository - -An image repository is a collection of related container images and tags identifying them. For example, the {product-title} Jenkins images are in the repository: - -[source,text] ----- -docker.io/openshift/jenkins-2-centos7 ----- diff --git a/modules/images-create-guide-general.adoc b/modules/images-create-guide-general.adoc deleted file mode 100644 index 89c34156a1d5..000000000000 --- a/modules/images-create-guide-general.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-guide-general_{context}"] -= General container image guidelines - -The following guidelines apply when creating a container image in general, and are independent of whether the images are used on {product-title}. - -[discrete] -== Reuse images - -Wherever possible, base your image on an appropriate upstream image using the `FROM` statement. This ensures your image can easily pick up security fixes from an upstream image when it is updated, rather than you having to update your dependencies directly. - -In addition, use tags in the `FROM` instruction, for example, `rhel:rhel7`, to make it clear to users exactly which version of an image your image is based on. Using a tag other than `latest` ensures your image is not subjected to breaking changes that might go into the `latest` version of an upstream image. - -[discrete] -== Maintain compatibility within tags - -When tagging your own images, try to maintain backwards compatibility within a tag. For example, if you provide an image named `foo` and it currently includes version `1.0`, you might provide a tag of `foo:v1`. When you update the image, as long as it continues to be compatible with the original image, you can continue to tag the new image `foo:v1`, and downstream consumers of this tag are able to get updates without being broken. - -If you later release an incompatible update, then switch to a new tag, for example `foo:v2`. This allows downstream consumers to move up to the new version at will, but not be inadvertently broken by the new incompatible image. Any downstream consumer using `foo:latest` takes on the risk of any incompatible changes being introduced. - -[discrete] -== Avoid multiple processes - -Do not start multiple services, such as a database and `SSHD`, inside one container. This is not necessary because containers are lightweight and can be easily linked together for orchestrating multiple processes. {product-title} allows you to easily colocate and co-manage related images by grouping them into a single pod. - -This colocation ensures the containers share a network namespace and storage for communication. Updates are also less disruptive as each image can be updated less frequently and independently. Signal handling flows are also clearer with a single process as you do not have to manage routing signals to spawned processes. - -[discrete] -== Use `exec` in wrapper scripts - -Many images use wrapper scripts to do some setup before starting a process for the software being run. If your image uses such a script, that script uses `exec` so that the script's process is replaced by your software. If you do not use `exec`, then signals sent by your container runtime go to your wrapper script instead of your software's process. This is not what you want. - -If you have a wrapper script that starts a process for some server. You start your container, for example, using `podman run -i`, which runs the wrapper script, which in turn starts your process. If you want to close your container with `CTRL+C`. If your wrapper script used `exec` to start the server process, `podman` sends SIGINT to the server process, and everything works as you expect. If you did not use `exec` in your wrapper script, `podman` sends SIGINT to the process for the wrapper script and your process keeps running like nothing happened. - -Also note that your process runs as `PID 1` when running in a container. This means that if your main process terminates, the entire container is stopped, canceling any child processes you launched from your `PID 1` process. - -//// -See the http://blog.phusion.nl/2015/01/20/docker-and-the-pid-1-zombie-reaping-problem/["Docker and the `PID 1` zombie reaping problem"] blog article for additional implications. -Also see the https://felipec.wordpress.com/2013/11/04/init/["Demystifying the init system (PID 1)"] blog article for a deep dive on PID 1 and `init` -systems. -//// - -[discrete] -== Clean temporary files - -Remove all temporary files you create during the build process. This also includes any files added with the `ADD` command. For example, run the `yum clean` command after performing `yum install` operations. - -You can prevent the `yum` cache from ending up in an image layer by creating your `RUN` statement as follows: - -[source,terminal] ----- -RUN yum -y install mypackage && yum -y install myotherpackage && yum clean all -y ----- - -Note that if you instead write: - -[source,terminal] ----- -RUN yum -y install mypackage -RUN yum -y install myotherpackage && yum clean all -y ----- - -Then the first `yum` invocation leaves extra files in that layer, and these files cannot be removed when the `yum clean` operation is run later. The extra files are not visible in the final image, but they are present in the underlying layers. - -The current container build process does not allow a command run in a later layer to shrink the space used by the image when something was removed in an earlier layer. However, this may change in the future. This means that if you perform an `rm` command in a later layer, although the files are hidden it does not reduce the overall size of the image to be downloaded. Therefore, as with the `yum clean` example, it is best to remove files in the same command that created them, where possible, so they do not end up written to a layer. - -In addition, performing multiple commands in a single `RUN` statement reduces the number of layers in your image, which improves download and extraction time. - -[discrete] -== Place instructions in the proper order - -The container builder reads the `Dockerfile` and runs the instructions from top to bottom. Every instruction that is successfully executed creates a layer which can be reused the next time this or another image is built. It is very important to place instructions that rarely change at the top of your `Dockerfile`. Doing so ensures the next builds of the same image are very fast because the cache is not invalidated by upper layer changes. - -For example, if you are working on a `Dockerfile` that contains an `ADD` command to install a file you are iterating on, and a `RUN` command to `yum install` a package, it is best to put the `ADD` command last: - -[source,terminal] ----- -FROM foo -RUN yum -y install mypackage && yum clean all -y -ADD myfile /test/myfile ----- - -This way each time you edit `myfile` and rerun `podman build` or `docker build`, the system reuses the cached layer for the `yum` command and only generates the new layer for the `ADD` operation. - -If instead you wrote the `Dockerfile` as: - -[source,terminal] ----- -FROM foo -ADD myfile /test/myfile -RUN yum -y install mypackage && yum clean all -y ----- - -Then each time you changed `myfile` and reran `podman build` or `docker build`, the `ADD` operation would invalidate the `RUN` layer cache, so the `yum` operation must be rerun as well. - -[discrete] -== Mark important ports - -The EXPOSE instruction makes a port in the container available to the host system and other containers. While it is possible to specify that a port should be exposed with a `podman run` invocation, using the EXPOSE instruction in a `Dockerfile` makes it easier for both humans and software to use your image by explicitly declaring the ports your software needs to run: - -* Exposed ports show up under `podman ps` associated with containers created from your image. -* Exposed ports are present in the metadata for your image returned by `podman inspect`. -* Exposed ports are linked when you link one container to another. - -[discrete] -== Set environment variables - -It is good practice to set environment variables with the `ENV` instruction. One example is to set the version of your project. This makes it easy for people to find the version without looking at the `Dockerfile`. Another example is advertising a path on the system that could be used by another process, such as `JAVA_HOME`. - -[discrete] -== Avoid default passwords - -Avoid setting default passwords. Many people extend the image and forget to remove or change the default password. This can lead to security issues if a user in production is assigned a well-known password. Passwords are configurable using an environment variable instead. - -If you do choose to set a default password, ensure that an appropriate warning message is displayed when the container is started. The message should inform the user of the value of the default password and explain how to change it, such as what environment variable to set. - -[discrete] -== Avoid sshd - -It is best to avoid running `sshd` in your image. You can use the `podman exec` or `docker exec` command to access containers that are running on the local host. Alternatively, you can use the `oc exec` command or the `oc rsh` command to access containers that are running on the {product-title} cluster. Installing and running `sshd` in your image opens up additional vectors for attack and requirements for security patching. - -[discrete] -== Use volumes for persistent data - -Images use a link:https://docs.docker.com/reference/builder/#volume[volume] for persistent data. This way {product-title} mounts the network storage to the node running the container, and if the container moves to a new node the storage is reattached to that node. By using the volume for all persistent storage needs, the content is preserved even if the container is restarted or moved. If your image writes data to arbitrary locations within the container, that content could not be preserved. - -All data that needs to be preserved even after the container is destroyed must be written to a volume. Container engines support a `readonly` flag for containers, which can be used to strictly enforce good practices about not writing data to ephemeral storage in a container. Designing your image around that capability now makes it easier to take advantage of it later. - -Explicitly defining volumes in your `Dockerfile` makes it easy for consumers of the image to understand what volumes they must define when running your image. - -See the link:https://kubernetes.io/docs/concepts/storage/volumes/[Kubernetes -documentation] for more information on how volumes are used in {product-title}. - -//// -For more information on how Volumes are used in {product-title}, see https://kubernetes.io/docs/concepts/storage/volumes[this documentation]. (NOTE to docs team: this link should really go to something in the openshift docs, once we have it) -//// - -[NOTE] -==== -Even with persistent volumes, each instance of your image has its own volume, and the filesystem is not shared between instances. This means the volume cannot be used to share state in a cluster. -==== - -//// -[role="_additional-resources"] -.Additional resources - -* Docker documentation - https://docs.docker.com/articles/dockerfile_best-practices/[Best practices for writing Dockerfiles] - -* Project Atomic documentation - http://www.projectatomic.io/docs/docker-image-author-guidance/[Guidance for Container Image Authors] -//// diff --git a/modules/images-create-guide-openshift.adoc b/modules/images-create-guide-openshift.adoc deleted file mode 100644 index 281ad8162bd5..000000000000 --- a/modules/images-create-guide-openshift.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-guide-openshift_{context}"] -= {product-title}-specific guidelines - -The following are guidelines that apply when creating container images specifically -for use on {product-title}. -ifdef::openshift-online[] - -[id="privileges-and-volume-builds_{context}"] -== Privileges and volume builds - -Container images cannot be built using the `VOLUME` directive in the `DOCKERFILE`. Images using a read/write file system must use persistent volumes or `emptyDir` volumes instead of local storage. Instead of specifying a volume in the Dockerfile, specify a directory for local storage and mount either a persistent volume or `emptyDir` volume to that directory when deploying the pod. -endif::[] - -[id="enable-images-for-source-to-image_{context}"] -== Enable images for source-to-image (S2I) - -For images that are intended to run application code provided by a third party, such as a Ruby image designed to run Ruby code provided by a developer, you can enable your image to work with the link:https://github.com/openshift/source-to-image[Source-to-Image (S2I)] build tool. S2I is a framework that makes it easy to write images that take application source code as an input and produce a new image that runs the assembled application as output. - - -[id="use-uid_{context}"] -== Support arbitrary user ids - -By default, {product-title} runs containers using an arbitrarily assigned user ID. This provides additional security against processes escaping the container due to a container engine vulnerability and thereby achieving escalated permissions on the host node. - -For an image to support running as an arbitrary user, directories and files that are written to by processes in the image must be owned by the root group and be read/writable by that group. Files to be executed must also have group execute permissions. - -Adding the following to your Dockerfile sets the directory and file permissions to allow users in the root group to access them in the built image: - -[source,terminal] ----- -RUN chgrp -R 0 /some/directory && \ - chmod -R g=u /some/directory ----- - -Because the container user is always a member of the root group, the container user can read and write these files. - -[WARNING] -==== -Care must be taken when altering the directories and file permissions of sensitive areas of a container, which is no different than to a normal system. - -If applied to sensitive areas, such as `/etc/passwd`, this can allow the modification of such files by unintended users potentially exposing the container or host. CRI-O supports the insertion of arbitrary user IDs into the container's `/etc/passwd`, so changing permissions is never required. -==== - -In addition, the processes running in the container must not listen on privileged ports, ports below 1024, since they are not running as a privileged user. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[IMPORTANT] -==== -If your S2I image does not include a `USER` declaration with a numeric user, your builds fail by default. To allow images that use either named users or the root `0` user to build in {product-title}, you can add the project's builder service account, `system:serviceaccount::builder`, to the `anyuid` security context constraint (SCC). Alternatively, you can allow all images to run as any user. -==== -endif::[] - -[id="use-services_{context}"] -== Use services for inter-image communication - -For cases where your image needs to communicate with a service provided by another image, such as a web front end image that needs to access a database image to store and retrieve data, your image consumes an {product-title} service. Services provide a static endpoint for access which does not change as containers are stopped, started, or moved. In addition, services provide load balancing for requests. - -//// -For more information see https://kubernetes.io/docs/concepts/services-networking/service/[this documentation]. (NOTE to docs team: this link should really go to something in the openshift docs once we have it) -//// - -[id="provide-common-libraries_{context}"] -== Provide common libraries - -For images that are intended to run application code provided by a third party, ensure that your image contains commonly used libraries for your platform. In particular, provide database drivers for common databases used with your platform. For example, provide JDBC drivers for MySQL and PostgreSQL if you are creating a Java framework image. Doing so prevents the need for common dependencies to be downloaded during application assembly time, speeding up application image builds. It also simplifies the work required by application developers to ensure all of their dependencies are met. - -[id="use-env-vars_{context}"] -== Use environment variables for configuration - -Users of your image are able to configure it without having to create a downstream image based on your image. This means that the runtime configuration is handled using environment variables. For a simple configuration, the running process can consume the environment variables directly. For a more complicated configuration or for runtimes which do not support this, configure the runtime by defining a template configuration file that is processed during startup. During this processing, values supplied using environment variables can be substituted into the configuration file or used to make decisions about what options to set in the configuration file. - -It is also possible and recommended to pass secrets such as certificates and keys into the container using environment variables. This ensures that the secret values do not end up committed in an image and leaked into a container image registry. - -Providing environment variables allows consumers of your image to customize behavior, such as database settings, passwords, and performance tuning, without having to introduce a new layer on top of your image. Instead, they can simply define environment variable values when defining a pod and change those settings without rebuilding the image. - -For extremely complex scenarios, configuration can also be supplied using volumes that would be mounted into the container at runtime. However, if you elect to do it this way you must ensure that your image provides clear error messages on startup when the necessary volume or configuration is not present. - -This topic is related to the Using Services for Inter-image Communication topic in that configuration like datasources are defined in terms of environment variables that provide the service endpoint information. This allows an application to dynamically consume a datasource service that is defined in the {product-title} environment without modifying the application image. - -In addition, tuning is done by inspecting the `cgroups` settings for the container. This allows the image to tune itself to the available memory, CPU, and other resources. For example, Java-based images tune their heap based on the `cgroup` maximum memory parameter to ensure they do not exceed the limits and get an out-of-memory error. - -//// -See the following references for more on how to manage `cgroup` quotas -in containers: - -- Blog article - https://goldmann.pl/blog/2014/09/11/resource-management-in-docker[Resource management in Docker] -- Docker documentation - https://docs.docker.com/engine/admin/runmetrics/[Runtime Metrics] -- Blog article - http://fabiokung.com/2014/03/13/memory-inside-linux-containers[Memory inside Linux containers] -//// - -[id="set-image-metadata_{context}"] -== Set image metadata - -Defining image metadata helps {product-title} better consume your container images, allowing {product-title} to create a better experience for developers using your image. For example, you can add metadata to provide helpful descriptions of your image, or offer suggestions on other images that are needed. - -[id="clustering_{context}"] -== Clustering - -You must fully understand what it means to run multiple instances of your image. In the simplest case, the load balancing function of a service handles routing traffic to all instances of your image. However, many frameworks must share information to perform leader election or failover state; for example, in session replication. - -Consider how your instances accomplish this communication when running in {product-title}. Although pods can communicate directly with each other, their IP addresses change anytime the pod starts, stops, or is moved. Therefore, it is important for your clustering scheme to be dynamic. - -[id="logging_{context}"] -== Logging - -It is best to send all logging to standard out. {product-title} collects standard out from containers and sends it to the centralized logging service where it can be viewed. If you must separate log content, prefix the output with an appropriate keyword, which makes it possible to filter the messages. - -If your image logs to a file, users must use manual operations to enter the running container and retrieve or view the log file. - -[id="liveness-and-readiness-probes_{context}"] -== Liveness and readiness probes - -Document example liveness and readiness probes that can be used with your image. These probes allow users to deploy your image with confidence that traffic is not be routed to the container until it is prepared to handle it, and that the container is restarted if the process gets into an unhealthy state. - -[id="templates_{context}"] -== Templates - -Consider providing an example template with your image. A template gives users an easy way to quickly get your image deployed with a working configuration. Your template must include the liveness and readiness probes you documented with the image, for completeness. - -//// -[role="_additional-resources"] -.Additional resources - -* link:https://docs.docker.com/engine/docker-overview/[Docker basics] -* link:https://docs.docker.com/engine/reference/builder/[Dockerfile reference] -* link:http://www.projectatomic.io/docs/docker-image-author-guidance[Project Atomic Guidance for Container Image Authors] -//// diff --git a/modules/images-create-guidelines.adoc b/modules/images-create-guidelines.adoc deleted file mode 100644 index 09725001eb7c..000000000000 --- a/modules/images-create-guidelines.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-guidelines_{context}"] -= Learning container best practices - -When creating container images to run on {product-title} there are a number of best practices to consider as an image author to ensure a good experience for consumers of those images. Because images are intended to be immutable and used as-is, the following guidelines help ensure that your images are highly consumable and easy to use on {product-title}. diff --git a/modules/images-create-metadata-define.adoc b/modules/images-create-metadata-define.adoc deleted file mode 100644 index 27b38648b81b..000000000000 --- a/modules/images-create-metadata-define.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-metadata-define_{context}"] -= Defining image metadata - -You can use the `LABEL` instruction in a `Dockerfile` to define image -metadata. Labels are similar to environment variables in that they are key value -pairs attached to an image or a container. Labels are different from environment -variable in that they are not visible to the running application and they can -also be used for fast look-up of images and containers. - -link:https://docs.docker.com/engine/reference/builder/#label[Docker -documentation] for more information on the `LABEL` instruction. - -The label names should typically be namespaced. The namespace should be set -accordingly to reflect the project that is going to pick up the labels and use -them. For {product-title} the namespace should be set to `io.openshift` and -for Kubernetes the namespace is `io.k8s`. - -See the https://docs.docker.com/engine/userguide/labels-custom-metadata[Docker custom -metadata] documentation for details about the format. - -.Supported Metadata -[cols="3a,8a",options="header"] -|=== - -|Variable |Description - -|`io.openshift.tags` -|This label contains a list of tags represented as list of comma-separated -string values. The tags are the way to categorize the container images into broad -areas of functionality. Tags help UI and generation tools to suggest relevant -container images during the application creation process. - ----- -LABEL io.openshift.tags mongodb,mongodb24,nosql ----- - -|`io.openshift.wants` -|Specifies a list of tags that the generation tools and the UI might use to -provide relevant suggestions if you do not have the container images with given tags -already. For example, if the container image wants `mysql` and `redis` and you -do not have the container image with `redis` tag, then UI might suggest you to add -this image into your deployment. - ----- -LABEL io.openshift.wants mongodb,redis ----- - -|`io.k8s.description` -|This label can be used to give the container image consumers more detailed -information about the service or functionality this image provides. The UI can -then use this description together with the container image name to provide more -human friendly information to end users. - ----- -LABEL io.k8s.description The MySQL 5.5 Server with master-slave replication support ----- - -|`io.openshift.non-scalable` -|An image might use this variable to suggest that it does not support scaling. -The UI will then communicate this to consumers of that image. Being not-scalable -basically means that the value of `replicas` should initially not be set higher -than 1. - ----- -LABEL io.openshift.non-scalable true ----- - -|`io.openshift.min-memory` and `io.openshift.min-cpu` -|This label suggests how much resources the container image might need to -work properly. The UI might warn the user that deploying this container image may -exceed their user quota. The values must be compatible with -Kubernetes quantity. - ----- -LABEL io.openshift.min-memory 16Gi -LABEL io.openshift.min-cpu 4 ----- - -|=== diff --git a/modules/images-create-metadata.adoc b/modules/images-create-metadata.adoc deleted file mode 100644 index 5279bda0d52c..000000000000 --- a/modules/images-create-metadata.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-metadata_{context}"] -= Including metadata in images - -Defining image metadata helps {product-title} better consume your container images, allowing {product-title} to create a better experience for developers using your image. For example, you can add metadata to provide helpful descriptions of your image, or offer suggestions on other images that may also be needed. - -This topic only defines the metadata needed by the current set of use cases. Additional metadata or use cases may be added in the future. - -== Defining image metadata -You can use the `LABEL` instruction in a `Dockerfile` to define image metadata. Labels are similar to environment variables in that they are key value pairs attached to an image or a container. Labels are different from environment variable in that they are not visible to the running application and they can also be used for fast look-up of images and containers. - -link:https://docs.docker.com/engine/reference/builder/#label[Docker -documentation] for more information on the `LABEL` instruction. - -The label names are typically namespaced. The namespace is set accordingly to reflect the project that is going to pick up the labels and use them. For {product-title} the namespace is set to `io.openshift` and for Kubernetes the namespace is `io.k8s`. - -See the https://docs.docker.com/engine/userguide/labels-custom-metadata[Docker custom metadata] documentation for details about the format. - -.Supported Metadata -[cols="3a,8a",options="header"] -|=== - -|Variable |Description - -|`io.openshift.tags` -|This label contains a list of tags represented as a list of comma-separated string values. The tags are the way to categorize the container images into broad areas of functionality. Tags help UI and generation tools to suggest relevant container images during the application creation process. - ----- -LABEL io.openshift.tags mongodb,mongodb24,nosql ----- - -|`io.openshift.wants` -|Specifies a list of tags that the generation tools and the UI uses to provide relevant suggestions if you do not have the container images with specified tags already. For example, if the container image wants `mysql` and `redis` and you do not have the container image with `redis` tag, then UI can suggest you to add this image into your deployment. - ----- -LABEL io.openshift.wants mongodb,redis ----- - -|`io.k8s.description` -|This label can be used to give the container image consumers more detailed information about the service or functionality this image provides. The UI can then use this description together with the container image name to provide more human friendly information to end users. - ----- -LABEL io.k8s.description The MySQL 5.5 Server with master-slave replication support ----- - -|`io.openshift.non-scalable` -|An image can use this variable to suggest that it does not support scaling. The UI then communicates this to consumers of that image. Being not-scalable means that the value of `replicas` should initially not be set higher than `1`. - ----- -LABEL io.openshift.non-scalable true ----- - -|`io.openshift.min-memory` and `io.openshift.min-cpu` -|This label suggests how much resources the container image needs to work properly. The UI can warn the user that deploying this container image may exceed their user quota. The values must be compatible with Kubernetes quantity. - ----- -LABEL io.openshift.min-memory 16Gi -LABEL io.openshift.min-cpu 4 ----- - -|=== diff --git a/modules/images-create-s2i-build.adoc b/modules/images-create-s2i-build.adoc deleted file mode 100644 index c90b36ee958c..000000000000 --- a/modules/images-create-s2i-build.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc -// * openshift_images/create-images.adoc - -:_content-type: CONCEPT -[id="images-create-s2i-build_{context}"] -= Understanding the source-to-image build process - -The build process consists of the following three fundamental elements, which are combined into a final container image: - -* Sources -* Source-to-image (S2I) scripts -* Builder image - -S2I generates a Dockerfile with the builder image as the first `FROM` instruction. The Dockerfile generated by S2I is then passed to Buildah. diff --git a/modules/images-create-s2i-scripts.adoc b/modules/images-create-s2i-scripts.adoc deleted file mode 100644 index 9ee9af33150a..000000000000 --- a/modules/images-create-s2i-scripts.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc -// * openshift_images/create-images.adoc - -[id="images-create-s2i-scripts_{context}"] -= How to write source-to-image scripts - -You can write source-to-image (S2I) scripts in any programming language, as long as the scripts are executable inside the builder image. S2I supports multiple options providing `assemble`/`run`/`save-artifacts` scripts. All of these locations are checked on each build in the following order: - -. A script specified in the build configuration. -. A script found in the application source `.s2i/bin` directory. -. A script found at the default image URL with the `io.openshift.s2i.scripts-url` label. - -Both the `io.openshift.s2i.scripts-url` label specified in the image and the script specified in a build configuration can take one of the following forms: - -* `image:///path_to_scripts_dir`: absolute path inside the image to a directory where the S2I scripts are located. -* `$$file:///path_to_scripts_dir$$`: relative or absolute path to a directory on the host where the S2I scripts are located. -* `http(s)://path_to_scripts_dir`: URL to a directory where the S2I scripts are located. - -.S2I scripts -[cols="3a,8a",options="header"] -|=== - -|Script |Description - -|`assemble` -|The `assemble` script builds the application artifacts from a source and places them into appropriate directories inside the image. This script is required. The workflow for this script is: - -. Optional: Restore build artifacts. If you want to support incremental builds, make sure to define `save-artifacts` as well. -. Place the application source in the desired location. -. Build the application artifacts. -. Install the artifacts into locations appropriate for them to run. - -|`run` -|The `run` script executes your application. This script is required. - -|`save-artifacts` -|The `save-artifacts` script gathers all dependencies that can speed up the build processes that follow. This script is optional. For example: - -* For Ruby, `gems` installed by Bundler. -* For Java, `.m2` contents. - -These dependencies are gathered into a `tar` file and streamed to the standard output. - -|`usage` -|The `usage` script allows you to inform the user how to properly use your image. This script is optional. - -|`test/run` -|The `test/run` script allows you to create a process to check if the image is working correctly. This script is optional. The proposed flow of that process is: - -. Build the image. -. Run the image to verify the `usage` script. -. Run `s2i build` to verify the `assemble` script. -. Optional: Run `s2i build` again to verify the `save-artifacts` and `assemble` scripts save and restore artifacts functionality. -. Run the image to verify the test application is working. - -[NOTE] -==== -The suggested location to put the test application built by your `test/run` script is the `test/test-app` directory in your image repository. -==== -|=== - -*Example S2I scripts* - -The following example S2I scripts are written in Bash. Each example assumes its `tar` contents are unpacked into the `/tmp/s2i` directory. - -.`assemble` script: -[source,bash] ----- -#!/bin/bash - -# restore build artifacts -if [ "$(ls /tmp/s2i/artifacts/ 2>/dev/null)" ]; then - mv /tmp/s2i/artifacts/* $HOME/. -fi - -# move the application source -mv /tmp/s2i/src $HOME/src - -# build application artifacts -pushd ${HOME} -make all - -# install the artifacts -make install -popd ----- - -.`run` script: -[source,bash] ----- -#!/bin/bash - -# run the application -/opt/application/run.sh ----- - - -.`save-artifacts` script: -[source,bash] ----- -#!/bin/bash - -pushd ${HOME} -if [ -d deps ]; then - # all deps contents to tar stream - tar cf - deps -fi -popd ----- - -.`usage` script: -[source,bash] ----- -#!/bin/bash - -# inform the user how to use the image -cat < ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe is/python ----- -+ -.Example output -[source,terminal] ----- -Name: python -Namespace: default -Created: About a minute ago -Labels: -Annotations: openshift.io/image.dockerRepositoryCheck=2017-10-02T17:05:11Z -Docker Pull Spec: docker-registry.default.svc:5000/default/python -Image Lookup: local=false -Unique Images: 1 -Tags: 1 - -3.5 - tagged from centos/python-35-centos7 - - * centos/python-35-centos7@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 - About a minute ago ----- - -* To get all of the information available about a particular image stream tag, enter the following command: -+ -[source,terminal] ----- -$ oc describe istag/: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe istag/python:latest ----- -+ -.Example output -[source,terminal] ----- -Image Name: sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 -Docker Image: centos/python-35-centos7@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 -Name: sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 -Created: 2 minutes ago -Image Size: 251.2 MB (first layer 2.898 MB, last binary layer 72.26 MB) -Image Created: 2 weeks ago -Author: -Arch: amd64 -Entrypoint: container-entrypoint -Command: /bin/sh -c $STI_SCRIPTS_PATH/usage -Working Dir: /opt/app-root/src -User: 1001 -Exposes Ports: 8080/tcp -Docker Labels: build-date=20170801 ----- -+ -[NOTE] -==== -More information is output than shown. -==== - -* Enter the following command to discover which architecture or operating system that an image stream tag supports: -+ -[source,terminal] ----- -$ oc get istag -ojsonpath="{range .image.dockerImageManifests[*]}{.os}/{.architecture}{'\n'}{end}" ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get istag busybox:latest -ojsonpath="{range .image.dockerImageManifests[*]}{.os}/{.architecture}{'\n'}{end}" ----- -+ -.Example output -[source,terminal] ----- -linux/amd64 -linux/arm -linux/arm64 -linux/386 -linux/mips64le -linux/ppc64le -linux/riscv64 -linux/s390x ----- \ No newline at end of file diff --git a/modules/images-id.adoc b/modules/images-id.adoc deleted file mode 100644 index b78a5cf67c01..000000000000 --- a/modules/images-id.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-id_{context}"] -= Image IDs - -An image ID is a SHA (Secure Hash Algorithm) code that can be used to pull an image. A SHA image ID cannot change. A specific SHA identifier always references the exact same container image content. For example: - -[source,text] ----- -docker.io/openshift/jenkins-2-centos7@sha256:ab312bda324 ----- diff --git a/modules/images-image-pull-policy-overview.adoc b/modules/images-image-pull-policy-overview.adoc deleted file mode 100644 index e17c0746dc30..000000000000 --- a/modules/images-image-pull-policy-overview.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-pull-policy - -:_content-type: CONCEPT -[id="images-image-pull-policy-overview_{context}"] -= Image pull policy overview - -When {product-title} creates containers, it uses the container `imagePullPolicy` to determine if the image should be pulled prior to starting the container. There are three possible values for `imagePullPolicy`: - -.`imagePullPolicy` values -[width="50%",options="header"] -|=== -|Value |Description - -|`Always` -|Always pull the image. - -|`IfNotPresent` -|Only pull the image if it does not already exist on the node. - -|`Never` -|Never pull the image. -|=== - - -If a container `imagePullPolicy` parameter is not specified, {product-title} sets it based on the image tag: - -. If the tag is `latest`, {product-title} defaults `imagePullPolicy` to `Always`. -. Otherwise, {product-title} defaults `imagePullPolicy` to `IfNotPresent`. diff --git a/modules/images-image-registry-about.adoc b/modules/images-image-registry-about.adoc deleted file mode 100644 index 9df7272c9ff6..000000000000 --- a/modules/images-image-registry-about.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.adoc - -[id="images-image-registry-about_{context}"] -= Image registry - -An image registry is a content server that can store and serve container images. For example: - -[source,text] ----- -registry.redhat.io ----- - -A registry contains a collection of one or more image repositories, which contain one or more tagged images. Red Hat provides a registry at `registry.redhat.io` for subscribers. {product-title} can also supply its own {product-registry} for managing custom container images. diff --git a/modules/images-imagestream-adding-tags.adoc b/modules/images-imagestream-adding-tags.adoc deleted file mode 100644 index 5965b854d45b..000000000000 --- a/modules/images-imagestream-adding-tags.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-adding-tags_{context}"] -= Adding tags to an image stream - -You can add additional tags to image streams. - -.Procedure - -* Add a tag that points to one of the existing tags by using the `oc tag`command: -+ -[source,terminal] ----- -$ oc tag ----- -+ -For example: -+ -[source,terminal] ----- -$ oc tag python:3.5 python:latest ----- -+ -.Example output -[source,terminal] ----- -Tag python:latest set to python@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25. ----- - -* Confirm the image stream has two tags, one, `3.5`, pointing at the external container image and another tag, `latest`, pointing to the same image because it was created based on the first tag. -+ -[source,terminal] ----- -$ oc describe is/python ----- -+ -.Example output -[source,terminal] ----- -Name: python -Namespace: default -Created: 5 minutes ago -Labels: -Annotations: openshift.io/image.dockerRepositoryCheck=2017-10-02T17:05:11Z -Docker Pull Spec: docker-registry.default.svc:5000/default/python -Image Lookup: local=false -Unique Images: 1 -Tags: 2 - -latest - tagged from python@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 - - * centos/python-35-centos7@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 - About a minute ago - -3.5 - tagged from centos/python-35-centos7 - - * centos/python-35-centos7@sha256:49c18358df82f4577386404991c51a9559f243e0b1bdc366df25 - 5 minutes ago ----- diff --git a/modules/images-imagestream-configure.adoc b/modules/images-imagestream-configure.adoc deleted file mode 100644 index d401be2713c0..000000000000 --- a/modules/images-imagestream-configure.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-imagestream-configure_{context}"] -= Configuring image streams - -An `ImageStream` object file contains the following elements. - -[id="image-stream-object-definition_{context}"] -.Imagestream object definition - -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - annotations: - openshift.io/generated-by: OpenShiftNewApp - labels: - app: ruby-sample-build - template: application-template-stibuild - name: origin-ruby-sample <1> - namespace: test -spec: {} -status: - dockerImageRepository: 172.30.56.218:5000/test/origin-ruby-sample <2> - tags: - - items: - - created: 2017-09-02T10:15:09Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d <3> - generation: 2 - image: sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 <4> - - created: 2017-09-01T13:40:11Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 - generation: 1 - image: sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d - tag: latest <5> ----- - -<1> The name of the image stream. -<2> Docker repository path where new images can be pushed to add or update them in this image stream. -<3> The SHA identifier that this image stream tag currently references. Resources that reference this image stream tag use this identifier. -<4> The SHA identifier that this image stream tag previously referenced. Can be used to rollback to an older image. -<5> The image stream tag name. diff --git a/modules/images-imagestream-external-image-tags.adoc b/modules/images-imagestream-external-image-tags.adoc deleted file mode 100644 index 6cd16d53c01b..000000000000 --- a/modules/images-imagestream-external-image-tags.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-external-image-tags_{context}"] -= Adding tags for an external image - -You can add tags for external images. - -.Procedure - -* Add tags pointing to internal or external images, by using the `oc tag` command for all tag-related operations: -+ -[source,terminal] ----- -$ oc tag ----- -+ -For example, this command maps the `docker.io/python:3.6.0` image to the `3.6` tag in the `python` image stream. -+ -[source,terminal] ----- -$ oc tag docker.io/python:3.6.0 python:3.6 ----- -+ -.Example output -[source,terminal] ----- -Tag python:3.6 set to docker.io/python:3.6.0. ----- -+ -If the external image is secured, you must create a secret with credentials for accessing that registry. diff --git a/modules/images-imagestream-image.adoc b/modules/images-imagestream-image.adoc deleted file mode 100644 index 4edd0f74a7b5..000000000000 --- a/modules/images-imagestream-image.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-imagestream-image_{context}"] -= Image stream images - -An image stream image allows you to retrieve a specific container image from a particular image stream where it is tagged. An image stream image is an API resource object that pulls together some metadata about a particular image SHA identifier. diff --git a/modules/images-imagestream-import-images-private-registry.adoc b/modules/images-imagestream-import-images-private-registry.adoc deleted file mode 100644 index 052da8c0cc8b..000000000000 --- a/modules/images-imagestream-import-images-private-registry.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * assembly/openshift_images/managing-image-streams.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-import-images-private-registry_{context}"] -= Importing images and image streams from private registries - -An image stream can be configured to import tag and image metadata from private image registries requiring authentication. This procedures applies if you change the registry that the Cluster Samples Operator uses to pull content from to something other than link:https://registry.redhat.io[registry.redhat.io]. - -[NOTE] -==== -When importing from insecure or secure registries, the registry URL defined in the secret must include the `:80` port suffix or the secret is not used when attempting to import from the registry. -==== - -.Procedure - -. You must create a `secret` object that is used to store your credentials by entering the following command: -+ -[source,terminal] ----- -$ oc create secret generic --from-file=.dockerconfigjson= --type=kubernetes.io/dockerconfigjson ----- -+ -. After the secret is configured, create the new image stream or enter the `oc import-image` command: -+ -[source,terminal] ----- -$ oc import-image --from= --confirm ----- -+ -During the import process, {product-title} picks up the secrets and provides them to the remote party. diff --git a/modules/images-imagestream-import-import-mode.adoc b/modules/images-imagestream-import-import-mode.adoc deleted file mode 100644 index 13824e32806c..000000000000 --- a/modules/images-imagestream-import-import-mode.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-import-import-mode_{context}"] -= Working with manifest lists - -You can import a single sub-manifest, or all manifests, of a manifest list when using `oc import-image` or `oc tag` CLI commands by adding the `--import-mode` flag. - -Refer to the commands below to create an image stream that includes a single sub-manifest or multi-architecture images. - -.Procedure - -* Create an image stream that includes multi-architecture images, and sets the import mode to `PreserveOriginal`, by entering the following command: -+ -[source,terminal] ----- -$ oc import-image --from=// \ ---import-mode='PreserveOriginal' --reference-policy=local --confirm ----- -+ -.Example output -+ -[source,terminal] ----- ---- -Arch: -Manifests: linux/amd64 sha256:6e325b86566fafd3c4683a05a219c30c421fbccbf8d87ab9d20d4ec1131c3451 - linux/arm64 sha256:d8fad562ffa75b96212c4a6dc81faf327d67714ed85475bf642729703a2b5bf6 - linux/ppc64le sha256:7b7e25338e40d8bdeb1b28e37fef5e64f0afd412530b257f5b02b30851f416e1 ---- ----- - -* Alternatively, enter the following command to import an image with the `Legacy` import mode, which discards manifest lists and imports a single sub-manifest: -+ -[source,terminal] ----- -$ oc import-image --from=// \ ---import-mode='Legacy' --confirm ----- -+ -[NOTE] -==== -The `--import-mode=` default value is `Legacy`. Excluding this value, or failing to specify either `Legacy` or `PreserveOriginal`, imports a single sub-manifest. An invalid import mode returns the following error: `error: valid ImportMode values are Legacy or PreserveOriginal`. -==== - -[discrete] -[id="images-imagestream-import-import-mode-limitations"] -== Limitations - -Working with manifest lists has the following limitations: - -* In some cases, users might want to use sub-manifests directly. When `oc adm prune images` is run, or the `CronJob` pruner runs, they cannot detect when a sub-manifest list is used. As a result, an administrator using `oc adm prune images`, or the `CronJob` pruner, might delete entire manifest lists, including sub-manifests. -+ -To avoid this limitation, you can use the manifest list by tag or by digest instead. diff --git a/modules/images-imagestream-import.adoc b/modules/images-imagestream-import.adoc deleted file mode 100644 index 5e3519586319..000000000000 --- a/modules/images-imagestream-import.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-import_{context}"] -= Configuring periodic importing of image stream tags - -When working with an external container image registry, to periodically re-import an image, for example to get latest security updates, you can use the `--scheduled` flag. - -.Procedure - -. Schedule importing images: -+ -[source,terminal] ----- -$ oc tag --scheduled ----- -+ -For example: -+ -[source,terminal] ----- -$ oc tag docker.io/python:3.6.0 python:3.6 --scheduled ----- -+ -.Example output -[source,terminal] ----- -Tag python:3.6 set to import docker.io/python:3.6.0 periodically. ----- -+ -This command causes {product-title} to periodically update this particular image stream tag. This period is a cluster-wide setting set to 15 minutes by default. - -. Remove the periodic check, re-run above command but omit the `--scheduled` flag. This will reset its behavior to default. -+ -[source,terminal] ----- -$ oc tag ----- \ No newline at end of file diff --git a/modules/images-imagestream-mapping.adoc b/modules/images-imagestream-mapping.adoc deleted file mode 100644 index 305bcde104f2..000000000000 --- a/modules/images-imagestream-mapping.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-imagestream-mapping_{context}"] -= Image stream mapping - -When the integrated registry receives a new image, it creates and sends an image stream mapping to {product-title}, providing the image's project, name, tag, and image metadata. - -[NOTE] -==== -Configuring image stream mappings is an advanced feature. -==== - -This information is used to create a new image, if it does not already exist, and to tag the image into the image stream. {product-title} stores complete metadata about each image, such as commands, entry point, and environment variables. Images in {product-title} are immutable and the maximum name length is 63 characters. - -The following image stream mapping example results in an image being tagged as `test/origin-ruby-sample:latest`: - -.Image stream mapping object definition - -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStreamMapping -metadata: - creationTimestamp: null - name: origin-ruby-sample - namespace: test -tag: latest -image: - dockerImageLayers: - - name: sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef - size: 0 - - name: sha256:ee1dd2cb6df21971f4af6de0f1d7782b81fb63156801cfde2bb47b4247c23c29 - size: 196634330 - - name: sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef - size: 0 - - name: sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef - size: 0 - - name: sha256:ca062656bff07f18bff46be00f40cfbb069687ec124ac0aa038fd676cfaea092 - size: 177723024 - - name: sha256:63d529c59c92843c395befd065de516ee9ed4995549f8218eac6ff088bfa6b6e - size: 55679776 - - name: sha256:92114219a04977b5563d7dff71ec4caa3a37a15b266ce42ee8f43dba9798c966 - size: 11939149 - dockerImageMetadata: - Architecture: amd64 - Config: - Cmd: - - /usr/libexec/s2i/run - Entrypoint: - - container-entrypoint - Env: - - RACK_ENV=production - - OPENSHIFT_BUILD_NAMESPACE=test - - OPENSHIFT_BUILD_SOURCE=https://github.com/openshift/ruby-hello-world.git - - EXAMPLE=sample-app - - OPENSHIFT_BUILD_NAME=ruby-sample-build-1 - - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - STI_SCRIPTS_URL=image:///usr/libexec/s2i - - STI_SCRIPTS_PATH=/usr/libexec/s2i - - HOME=/opt/app-root/src - - BASH_ENV=/opt/app-root/etc/scl_enable - - ENV=/opt/app-root/etc/scl_enable - - PROMPT_COMMAND=. /opt/app-root/etc/scl_enable - - RUBY_VERSION=2.2 - ExposedPorts: - 8080/tcp: {} - Labels: - build-date: 2015-12-23 - io.k8s.description: Platform for building and running Ruby 2.2 applications - io.k8s.display-name: 172.30.56.218:5000/test/origin-ruby-sample:latest - io.openshift.build.commit.author: Ben Parees - io.openshift.build.commit.date: Wed Jan 20 10:14:27 2016 -0500 - io.openshift.build.commit.id: 00cadc392d39d5ef9117cbc8a31db0889eedd442 - io.openshift.build.commit.message: 'Merge pull request #51 from php-coder/fix_url_and_sti' - io.openshift.build.commit.ref: master - io.openshift.build.image: centos/ruby-22-centos7@sha256:3a335d7d8a452970c5b4054ad7118ff134b3a6b50a2bb6d0c07c746e8986b28e - io.openshift.build.source-location: https://github.com/openshift/ruby-hello-world.git - io.openshift.builder-base-version: 8d95148 - io.openshift.builder-version: 8847438ba06307f86ac877465eadc835201241df - io.openshift.s2i.scripts-url: image:///usr/libexec/s2i - io.openshift.tags: builder,ruby,ruby22 - io.s2i.scripts-url: image:///usr/libexec/s2i - license: GPLv2 - name: CentOS Base Image - vendor: CentOS - User: "1001" - WorkingDir: /opt/app-root/src - Container: 86e9a4a3c760271671ab913616c51c9f3cea846ca524bf07c04a6f6c9e103a76 - ContainerConfig: - AttachStdout: true - Cmd: - - /bin/sh - - -c - - tar -C /tmp -xf - && /usr/libexec/s2i/assemble - Entrypoint: - - container-entrypoint - Env: - - RACK_ENV=production - - OPENSHIFT_BUILD_NAME=ruby-sample-build-1 - - OPENSHIFT_BUILD_NAMESPACE=test - - OPENSHIFT_BUILD_SOURCE=https://github.com/openshift/ruby-hello-world.git - - EXAMPLE=sample-app - - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - STI_SCRIPTS_URL=image:///usr/libexec/s2i - - STI_SCRIPTS_PATH=/usr/libexec/s2i - - HOME=/opt/app-root/src - - BASH_ENV=/opt/app-root/etc/scl_enable - - ENV=/opt/app-root/etc/scl_enable - - PROMPT_COMMAND=. /opt/app-root/etc/scl_enable - - RUBY_VERSION=2.2 - ExposedPorts: - 8080/tcp: {} - Hostname: ruby-sample-build-1-build - Image: centos/ruby-22-centos7@sha256:3a335d7d8a452970c5b4054ad7118ff134b3a6b50a2bb6d0c07c746e8986b28e - OpenStdin: true - StdinOnce: true - User: "1001" - WorkingDir: /opt/app-root/src - Created: 2016-01-29T13:40:00Z - DockerVersion: 1.8.2.fc21 - Id: 9d7fd5e2d15495802028c569d544329f4286dcd1c9c085ff5699218dbaa69b43 - Parent: 57b08d979c86f4500dc8cad639c9518744c8dd39447c055a3517dc9c18d6fccd - Size: 441976279 - apiVersion: "1.0" - kind: DockerImage - dockerImageMetadataVersion: "1.0" - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d ----- diff --git a/modules/images-imagestream-periodic-import-list.adoc b/modules/images-imagestream-periodic-import-list.adoc deleted file mode 100644 index c736f6c4405d..000000000000 --- a/modules/images-imagestream-periodic-import-list.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-periodic-import-list_{context}"] -= Configuring periodic importing of manifest lists - -To periodically re-import a manifest list, you can use the `--scheduled` flag. - -.Procedure - -* Set the image stream to periodically update the manifest list by entering the following command: -+ -[source,terminal] ----- -$ oc import-image --from=// \ ---import-mode='PreserveOriginal' --scheduled=true ----- \ No newline at end of file diff --git a/modules/images-imagestream-remove-tag.adoc b/modules/images-imagestream-remove-tag.adoc deleted file mode 100644 index 7a7e94d53003..000000000000 --- a/modules/images-imagestream-remove-tag.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-remove-tag_{context}"] -= Removing image stream tags - -You can remove old tags from an image stream. - -.Procedure - -* Remove old tags from an image stream: -+ -[source,terminal] ----- -$ oc tag -d ----- -+ -For example: -+ -[source,terminal] ----- -$ oc tag -d python:3.5 ----- -+ -.Example output -[source,terminal] ----- -Deleted tag default/python:3.5. ----- diff --git a/modules/images-imagestream-specify-architecture.adoc b/modules/images-imagestream-specify-architecture.adoc deleted file mode 100644 index bc3d6dbe1dad..000000000000 --- a/modules/images-imagestream-specify-architecture.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// * assembly/openshift_images/managing-image-streams.adoc - -:_content-type: CONCEPT -[id="images-imagestream-specify-architecture_{context}"] -= Specifying architecture for --import-mode - -You can swap your imported image stream between multi-architecture and single architecture by excluding or including the `--import-mode=` flag - -.Procedure - -* Run the following command to update your image stream from multi-architecture to single architecture by excluding the `--import-mode=` flag: -+ -[source,terminal] ----- -$ oc import-image --from=// ----- - -* Run the following command to update your image stream from single-architecture to multi-architecture: -+ -[source,terminal] ----- -$ oc import-image --from=// \ ---import-mode='PreserveOriginal' ----- \ No newline at end of file diff --git a/modules/images-imagestream-ssl-import-list.adoc b/modules/images-imagestream-ssl-import-list.adoc deleted file mode 100644 index 2546dd13f834..000000000000 --- a/modules/images-imagestream-ssl-import-list.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-ssl-import-list_{context}"] -= Configuring SSL/TSL when importing manifest lists - -To configure SSL/TSL when importing a manifest list, you can use the `--insecure` flag. - -.Procedure - -* Set `--insecure=true` so that importing a manifest list skips SSL/TSL verification. For example: -+ -[source,terminal] ----- -$ oc import-image --from=// \ ---import-mode='PreserveOriginal' --insecure=true ----- \ No newline at end of file diff --git a/modules/images-imagestream-tag.adoc b/modules/images-imagestream-tag.adoc deleted file mode 100644 index e2a7387ade13..000000000000 --- a/modules/images-imagestream-tag.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-imagestream-tag_{context}"] -= Image stream tags - -An image stream tag is a named pointer to an image in an image stream. An image stream tag is similar to a container image tag. diff --git a/modules/images-imagestream-trigger.adoc b/modules/images-imagestream-trigger.adoc deleted file mode 100644 index 256dadcf7f39..000000000000 --- a/modules/images-imagestream-trigger.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="images-imagestream-trigger_{context}"] -= Image stream triggers - -An image stream trigger causes a specific action when an image stream tag changes. For example, importing can cause the value of the tag to change, which causes a trigger to fire when there are deployments, builds, or other resources listening for those. diff --git a/modules/images-imagestream-update-tag.adoc b/modules/images-imagestream-update-tag.adoc deleted file mode 100644 index 4ec866a23446..000000000000 --- a/modules/images-imagestream-update-tag.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -:_content-type: PROCEDURE -[id="images-imagestream-update-tag_{context}"] -= Updating image stream tags - -You can update a tag to reflect another tag in an image stream. - -.Procedure - -* Update a tag: -+ -[source,terminal] ----- -$ oc tag ----- -+ -For example, the following updates the `latest` tag to reflect the `3.6` tag in an image stream: -+ -[source,terminal] ----- -$ oc tag python:3.6 python:latest ----- -+ -.Example output -[source,terminal] ----- -Tag python:latest set to python@sha256:438208801c4806548460b27bd1fbcb7bb188273d13871ab43f. ----- diff --git a/modules/images-imagestream-use.adoc b/modules/images-imagestream-use.adoc deleted file mode 100644 index 853a45d1b5fb..000000000000 --- a/modules/images-imagestream-use.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc -// * openshift_images/image-streams-managing.adoc - -[id="images-imagestream-use_{context}"] -= Why use imagestreams - -An image stream and its associated tags provide an abstraction for referencing container images from within {product-title}. The image stream and its tags allow you to see what images are available and ensure that you are using the specific image you need even if the image in the repository changes. - -Image streams do not contain actual image data, but present a single virtual view of related images, similar to an image repository. - -You can configure builds and deployments to watch an image stream for notifications when new images are added and react by performing a build or deployment, respectively. - -For example, if a deployment is using a certain image and a new version of that image is created, a deployment could be automatically performed to pick up the new version of the image. - -However, if the image stream tag used by the deployment or build is not updated, then even if the container image in the container image registry is updated, the build or deployment continues using the previous, presumably known good -image. - -The source images can be stored in any of the following: - -* {product-title}'s integrated registry. -* An external registry, for example registry.redhat.io or quay.io. -* Other image streams in the {product-title} cluster. - -When you define an object that references an image stream tag, such as a build or deployment configuration, you point to an image stream tag and not the repository. When you build or deploy your application, {product-title} queries the repository using the image stream tag to locate the associated ID of the image and uses that exact image. - -The image stream metadata is stored in the etcd instance along with other cluster information. - -Using image streams has several significant benefits: - -* You can tag, rollback a tag, and quickly deal with images, without having to re-push using the command line. - -* You can trigger builds and deployments when a new image is pushed to the registry. Also, {product-title} has generic triggers for other resources, such as Kubernetes objects. - -* You can mark a tag for periodic re-import. If the source image has changed, that change is picked up and reflected in the image stream, which triggers the build or deployment flow, depending upon the build or deployment configuration. - -* You can share images using fine-grained access control and quickly distribute images across your teams. - -* If the source image changes, the image stream tag still points to a known-good version of the image, ensuring that your application do not break unexpectedly. - -* You can configure security around who can view and use the images through permissions on the image stream objects. - -* Users that lack permission to read or list images on the cluster level can still retrieve the images tagged in a project using image streams. diff --git a/modules/images-managing-images-enabling-imagestreams-kube.adoc b/modules/images-managing-images-enabling-imagestreams-kube.adoc deleted file mode 100644 index ba537f178b16..000000000000 --- a/modules/images-managing-images-enabling-imagestreams-kube.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/managing-images/using-imagestreams-with-kube-resources.adoc - - -:_content-type: PROCEDURE -[id="images-managing-images-enabling-imagestreams-kube_{context}"] -= Enabling image streams with Kubernetes resources - -When using image streams with Kubernetes resources, you can only reference image streams that reside in the same project as the resource. The image stream reference must consist of a single segment value, for example `ruby:2.5`, where `ruby` is the name of an image stream that has a tag named `2.5` and resides in the same project as the resource making the reference. - -[NOTE] -==== -This feature can not be used in the `default` namespace, nor in any `openshift-` or `kube-` namespace. -==== - -There are two ways to enable image streams with Kubernetes resources: - -* Enabling image stream resolution on a specific resource. This allows only this resource to use the image stream name in the image field. -* Enabling image stream resolution on an image stream. This allows all resources pointing to this image stream to use it in the image field. - -.Procedure - -You can use `oc set image-lookup` to enable image stream resolution on a specific resource or image stream resolution on an image stream. - -. To allow all resources to reference the image stream named `mysql`, enter the following command: -+ -[source,terminal] ----- -$ oc set image-lookup mysql ----- -+ -This sets the `Imagestream.spec.lookupPolicy.local` field to true. -+ -.Imagestream with image lookup enabled -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - annotations: - openshift.io/display-name: mysql - name: mysql - namespace: myproject -spec: - lookupPolicy: - local: true ----- -+ -When enabled, the behavior is enabled for all tags within the image stream. -+ -. Then you can query the image streams and see if the option is set: -+ -[source,terminal] ----- -$ oc set image-lookup imagestream --list ----- - -You can enable image lookup on a specific resource. - -* To allow the Kubernetes deployment named `mysql` to use image streams, run the following command: -+ -[source,terminal] ----- -$ oc set image-lookup deploy/mysql ----- -+ -This sets the `alpha.image.policy.openshift.io/resolve-names` annotation -on the deployment. -+ -.Deployment with image lookup enabled -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mysql - namespace: myproject -spec: - replicas: 1 - template: - metadata: - annotations: - alpha.image.policy.openshift.io/resolve-names: '*' - spec: - containers: - - image: mysql:latest - imagePullPolicy: Always - name: mysql ----- - -You can disable image lookup. - -* To disable image lookup, pass `--enabled=false`: -+ -[source,terminal] ----- -$ oc set image-lookup deploy/mysql --enabled=false ----- diff --git a/modules/images-managing-overview.adoc b/modules/images-managing-overview.adoc deleted file mode 100644 index 7de251d923fe..000000000000 --- a/modules/images-managing-overview.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift-images/managing-images.adoc - -:_content-type: CONCEPT -[id="images-managing-overview_{context}"] -= Images overview - -An image stream comprises any number of container images identified by tags. It presents a single virtual view of related images, similar to a container image repository. - -By watching an image stream, builds and deployments can receive notifications when new images are added or modified and react by performing a build or deployment, respectively. diff --git a/modules/images-other-jenkins-agent-env-var.adoc b/modules/images-other-jenkins-agent-env-var.adoc deleted file mode 100644 index 37dafbad9e2e..000000000000 --- a/modules/images-other-jenkins-agent-env-var.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins-agent.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-agent-env-var_{context}"] -= Jenkins agent environment variables - -Each Jenkins agent container can be configured with the following environment variables. - -[options="header"] -|=== -| Variable | Definition | Example values and settings - -|`JAVA_MAX_HEAP_PARAM`, -`CONTAINER_HEAP_PERCENT`, -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` -|These values control the maximum heap size of the Jenkins JVM. If `JAVA_MAX_HEAP_PARAM` is set, its value takes precedence. Otherwise, the maximum heap size is dynamically calculated as `CONTAINER_HEAP_PERCENT` of the container memory limit, optionally capped at `JENKINS_MAX_HEAP_UPPER_BOUND_MB` MiB. - -By default, the maximum heap size of the Jenkins JVM is set to 50% of the container memory limit with no cap. -|`JAVA_MAX_HEAP_PARAM` example setting: `-Xmx512m` - -`CONTAINER_HEAP_PERCENT` default: `0.5`, or 50% - -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` example setting: `512 MiB` - -|`JAVA_INITIAL_HEAP_PARAM`, -`CONTAINER_INITIAL_PERCENT` -|These values control the initial heap size of the Jenkins JVM. If `JAVA_INITIAL_HEAP_PARAM` is set, its value takes precedence. Otherwise, the initial heap size is dynamically calculated as `CONTAINER_INITIAL_PERCENT` of the dynamically calculated maximum heap size. - -By default, the JVM sets the initial heap size. -|`JAVA_INITIAL_HEAP_PARAM` example setting: `-Xms32m` - -`CONTAINER_INITIAL_PERCENT` example setting: `0.1`, or 10% - -|`CONTAINER_CORE_LIMIT` -|If set, specifies an integer number of cores used for sizing numbers of internal -JVM threads. -|Example setting: `2` - -|`JAVA_TOOL_OPTIONS` -|Specifies options to apply to all JVMs running in this container. It is not recommended to override this value. -|Default: `-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -Dsun.zip.disableMemoryMapping=true` - -|`JAVA_GC_OPTS` -|Specifies Jenkins JVM garbage collection parameters. It is not recommended to override this value. -|Default: `-XX:+UseParallelGC -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10 -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90` - -|`JENKINS_JAVA_OVERRIDES` -|Specifies additional options for the Jenkins JVM. These options are appended to all other options, including the Java options above, and can be used to override any of them, if necessary. Separate each additional option with a space and if any option contains space characters, escape them with a backslash. -|Example settings: `-Dfoo -Dbar`; `-Dfoo=first\ value -Dbar=second\ value` - -|`USE_JAVA_VERSION` -|Specifies the version of Java version to use to run the agent in its container. The container base image has two versions of java installed: `java-11` and `java-1.8.0`. If you extend the container base image, you can specify any alternative version of java using its associated suffix. -|The default value is `java-11`. - -Example setting: `java-1.8.0` - -|=== diff --git a/modules/images-other-jenkins-agent-gradle.adoc b/modules/images-other-jenkins-agent-gradle.adoc deleted file mode 100644 index 03e94695f8b3..000000000000 --- a/modules/images-other-jenkins-agent-gradle.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins-agent.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-agent-gradle_{context}"] -= Jenkins agent Gradle builds - -Hosting Gradle builds in the Jenkins agent on {product-title} presents additional complications because in addition to the Jenkins JNLP agent and Gradle JVMs, Gradle spawns a third JVM to run tests if they are specified. - - -The following settings are suggested as a starting point for running Gradle builds in a memory constrained Jenkins agent on {product-title}. You can modify these settings as required. - -* Ensure the long-lived Gradle daemon is disabled by adding `org.gradle.daemon=false` to the `gradle.properties` file. -* Disable parallel build execution by ensuring `org.gradle.parallel=true` is not set in the `gradle.properties` file and that `--parallel` is not set as a command line argument. -* To prevent Java compilations running out-of-process, set `java { options.fork = false }` in the `build.gradle` file. -* Disable multiple additional test processes by ensuring `test { maxParallelForks = 1 }` is set in the `build.gradle` file. -* Override the Gradle JVM memory parameters by the `GRADLE_OPTS`, `JAVA_OPTS` or `JAVA_TOOL_OPTIONS` environment variables. -* Set the maximum heap size and JVM arguments for any Gradle test JVM by defining the `maxHeapSize` and `jvmArgs` settings in `build.gradle`, or through the `-Dorg.gradle.jvmargs` command line argument. diff --git a/modules/images-other-jenkins-agent-images.adoc b/modules/images-other-jenkins-agent-images.adoc deleted file mode 100644 index 33bb1b484a59..000000000000 --- a/modules/images-other-jenkins-agent-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins-agent.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-agent-images_{context}"] -= Jenkins agent images - -The {product-title} Jenkins agent images are available on link:https://quay.io[Quay.io] or link:https://registry.redhat.io[registry.redhat.io]. - -Jenkins images are available through the Red Hat Registry: - -[source,terminal] ----- -$ docker pull registry.redhat.io/ocp-tools-4/jenkins-rhel8: ----- - -[source,terminal] ----- -$ docker pull registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8: ----- - -To use these images, you can either access them directly from link:https://quay.io[Quay.io] or link:https://registry.redhat.io[registry.redhat.io] or push them into your {product-title} container image registry. diff --git a/modules/images-other-jenkins-agent-memory.adoc b/modules/images-other-jenkins-agent-memory.adoc deleted file mode 100644 index e3bff2646037..000000000000 --- a/modules/images-other-jenkins-agent-memory.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins-agent.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-agent-memory_{context}"] -= Jenkins agent memory requirements - -A JVM is used in all Jenkins agents to host the Jenkins JNLP agent as well as to run any Java applications such as `javac`, Maven, or Gradle. - -By default, the Jenkins JNLP agent JVM uses 50% of the container memory limit for its heap. This value can be modified by the `CONTAINER_HEAP_PERCENT` environment variable. It can also be capped at an upper limit or overridden entirely. - -By default, any other processes run in the Jenkins agent container, such as shell scripts or `oc` commands run from pipelines, cannot use more than the remaining 50% memory limit without provoking an OOM kill. - -By default, each further JVM process that runs in a Jenkins agent container uses up to 25% of the container memory limit for its heap. It might be necessary to tune this limit for many build workloads. diff --git a/modules/images-other-jenkins-agent-pod-retention.adoc b/modules/images-other-jenkins-agent-pod-retention.adoc deleted file mode 100644 index 74e36a839b96..000000000000 --- a/modules/images-other-jenkins-agent-pod-retention.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins-agent.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-agent-pod-retention_{context}"] -= Jenkins agent pod retention - -Jenkins agent pods, are deleted by default after the build completes or is stopped. This behavior can be changed by the Kubernetes plugin pod retention setting. Pod retention can be set for all Jenkins builds, with overrides for each pod template. The following behaviors are supported: - -* `Always` keeps the build pod regardless of build result. -* `Default` uses the plugin value, which is the pod template only. -* `Never` always deletes the pod. -* `On Failure` keeps the pod if it fails during the build. - -You can override pod retention in the pipeline Jenkinsfile: - -[source,groovy] ----- -podTemplate(label: "mypod", - cloud: "openshift", - inheritFrom: "maven", - podRetention: onFailure(), <1> - containers: [ - ... - ]) { - node("mypod") { - ... - } -} ----- -<1> Allowed values for `podRetention` are `never()`, `onFailure()`, `always()`, and `default()`. - -[WARNING] -==== -Pods that are kept might continue to run and count against resource quotas. -==== diff --git a/modules/images-other-jenkins-auth.adoc b/modules/images-other-jenkins-auth.adoc deleted file mode 100644 index 67592c60a648..000000000000 --- a/modules/images-other-jenkins-auth.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: PROCEDURE -[id="images-other-jenkins-auth_{context}"] -= Jenkins authentication - -Jenkins authentication is used by default if the image is run directly, without using a template. - -The first time Jenkins starts, the configuration is created along with the administrator user and password. The default user credentials are `admin` and `password`. Configure the default password by setting the `JENKINS_PASSWORD` environment variable when using, and only when using, standard Jenkins authentication. - -.Procedure - -* Create a Jenkins application that uses standard Jenkins authentication: -+ -[source,terminal] ----- -$ oc new-app -e \ - JENKINS_PASSWORD= \ - ocp-tools-4/jenkins-rhel8 ----- diff --git a/modules/images-other-jenkins-config-kubernetes.adoc b/modules/images-other-jenkins-config-kubernetes.adoc deleted file mode 100644 index faad540c5695..000000000000 --- a/modules/images-other-jenkins-config-kubernetes.adoc +++ /dev/null @@ -1,171 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-config-kubernetes_{context}"] -= Configuring the Jenkins Kubernetes plugin - -The OpenShift Jenkins image includes the pre-installed link:https://wiki.jenkins-ci.org/display/JENKINS/Kubernetes+Plugin[Kubernetes plugin for Jenkins] so that Jenkins agents can be dynamically provisioned on multiple container hosts using Kubernetes and {product-title}. - -To use the Kubernetes plugin, {product-title} provides an OpenShift Agent Base image that is suitable for use as a Jenkins agent. - -[IMPORTANT] -==== -{product-title} 4.11 moves the OpenShift Jenkins and OpenShift Agent Base images to the `ocp-tools-4` repository at `registry.redhat.io` so that Red Hat can produce and update the images outside the {product-title} lifecycle. Previously, these images were in the {product-title} install payload and the `openshift4` repository at `registry.redhat.io`. - -The OpenShift Jenkins Maven and NodeJS Agent images were removed from the {product-title} 4.11 payload. Red Hat no longer produces these images, and they are not available from the `ocp-tools-4` repository at `registry.redhat.io`. Red Hat maintains the 4.10 and earlier versions of these images for any significant bug fixes or security CVEs, following the link:https://access.redhat.com/support/policy/updates/openshift[{product-title} lifecycle policy]. - -For more information, see the "Important changes to OpenShift Jenkins images" link in the following "Additional resources" section. -==== - -The Maven and Node.js agent images are automatically configured as Kubernetes pod template images within the {product-title} Jenkins image configuration for the Kubernetes plugin. That configuration includes labels for each image that you can apply to any of your Jenkins jobs under their `Restrict where this project can be run` setting. If the label is applied, jobs run under an {product-title} pod running the respective agent image. - -[IMPORTANT] -==== -In {product-title} 4.10 and later, the recommended pattern for running Jenkins agents using the Kubernetes plugin is to use pod templates with both `jnlp` and `sidecar` containers. The `jnlp` container uses the {product-title} Jenkins Base agent image to facilitate launching a separate pod for your build. The `sidecar` container image has the tools needed to build in a particular language within the separate pod that was launched. Many container images from the Red Hat Container Catalog are referenced in the sample image streams in the `openshift` namespace. The {product-title} Jenkins image has a pod template named `java-build` with sidecar containers that demonstrate this approach. This pod template uses the latest Java version provided by the `java` image stream in the `openshift` namespace. -==== - -The Jenkins image also provides auto-discovery and auto-configuration of additional agent images for the Kubernetes plugin. - -With the {product-title} sync plugin, on Jenkins startup, the Jenkins image searches within the project it is running, or the projects listed in the plugin's configuration, for the following items: - -* Image streams with the `role` label set to `jenkins-agent`. -* Image stream tags with the `role` annotation set to `jenkins-agent`. -* Config maps with the `role` label set to `jenkins-agent`. - -When the Jenkins image finds an image stream with the appropriate label, or an image stream tag with the appropriate annotation, it generates the corresponding Kubernetes plugin configuration. This way, you can assign your Jenkins jobs to run in a pod running the container image provided by the image stream. - -The name and image references of the image stream, or image stream tag, are mapped to the name and image fields in the Kubernetes plugin pod template. You can control the label field of the Kubernetes plugin pod template by setting an annotation on the image stream, or image stream tag object, with the key `agent-label`. Otherwise, the name is used as the label. - -[NOTE] -==== -Do not log in to the Jenkins console and change the pod template configuration. If you do so after the pod template is created, and the {product-title} Sync plugin detects that the image associated with the image stream or image stream tag has changed, it replaces the pod template and overwrites those configuration changes. You cannot merge a new configuration with the existing configuration. - -Consider the config map approach if you have more complex configuration needs. -==== - -When it finds a config map with the appropriate label, the Jenkins image assumes that any values in the key-value data payload of the config map contain Extensible Markup Language (XML) consistent with the configuration format for Jenkins and the Kubernetes plugin pod templates. One key advantage of config maps over image streams and image stream tags is that you can control all the Kubernetes plugin pod template parameters. - -.Sample config map for `jenkins-agent` -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: jenkins-agent - labels: - role: jenkins-agent -data: - template1: |- - - - template1 - 2147483647 - 0 - - jenkins - - - - - jnlp - openshift/jenkins-agent-maven-35-centos7:v3.10 - false - true - /tmp - - ${computer.jnlpmac} ${computer.name} - false - - - - - - - - - - - - ----- - -The following example shows two containers that reference image streams in the `openshift` namespace. One container handles the JNLP contract for launching Pods as Jenkins Agents. The other container uses an image with tools for building code in a particular coding language: - -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: jenkins-agent - labels: - role: jenkins-agent -data: - template2: |- - - - template2 - 2147483647 - 0 - - jenkins - - - - - jnlp - image-registry.openshift-image-registry.svc:5000/openshift/jenkins-agent-base-rhel8:latest - false - true - /home/jenkins/agent - - \$(JENKINS_SECRET) \$(JENKINS_NAME) - false - - - - - - - - java - image-registry.openshift-image-registry.svc:5000/openshift/java:latest - false - true - /home/jenkins/agent - cat - - true - - - - - - - - - - - - ----- - - -[NOTE] -==== -Do not log in to the Jenkins console and change the pod template configuration. If you do so after the pod template is created, and the {product-title} Sync plugin detects that the image associated with the image stream or image stream tag has changed, it replaces the pod template and overwrites those configuration changes. You cannot merge a new configuration with the existing configuration. - -Consider the config map approach if you have more complex configuration needs. -==== - -After it is installed, the {product-title} Sync plugin monitors the API server of {product-title} for updates to image streams, image stream tags, and config maps and adjusts the configuration of the Kubernetes plugin. - -The following rules apply: - -* Removing the label or annotation from the config map, image stream, or image stream tag deletes any existing `PodTemplate` from the configuration of the Kubernetes plugin. -* If those objects are removed, the corresponding configuration is removed from the Kubernetes plugin. -* If you create appropriately labeled or annotated `ConfigMap`, `ImageStream`, or `ImageStreamTag` objects, or add labels after their initial creation, this results in the creation of a `PodTemplate` in the Kubernetes-plugin configuration. -* In the case of the `PodTemplate` by config map form, changes to the config map data for the `PodTemplate` are applied to the `PodTemplate` settings in the Kubernetes plugin configuration. The changes also override any changes that were made to the `PodTemplate` through the Jenkins UI between changes to the config map. - -To use a container image as a Jenkins agent, the image must run the agent as an entry point. For more details, see the official https://wiki.jenkins-ci.org/display/JENKINS/Distributed+builds#Distributedbuilds-Launchslaveagentheadlessly[Jenkins documentation]. diff --git a/modules/images-other-jenkins-create-service.adoc b/modules/images-other-jenkins-create-service.adoc deleted file mode 100644 index ba9c53fcadbc..000000000000 --- a/modules/images-other-jenkins-create-service.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: PROCEDURE -[id="images-other-jenkins-create-service_{context}"] -= Creating a Jenkins service from a template - -Templates provide parameter fields to define all the environment variables with predefined default values. {product-title} provides templates to make creating a new Jenkins service easy. The Jenkins templates should be registered in the default `openshift` project by your cluster administrator during the initial cluster setup. - -The two available templates both define deployment configuration and a service. The templates differ in their storage strategy, which affects whether the Jenkins content persists across a pod restart. - -[NOTE] -==== -A pod might be restarted when it is moved to another node or when an update of the deployment configuration triggers a redeployment. -==== - -* `jenkins-ephemeral` uses ephemeral storage. On pod restart, all data is lost. This template is only useful for development or testing. - -* `jenkins-persistent` uses a Persistent Volume (PV) store. Data survives a pod restart. - -To use a PV store, the cluster administrator must define a PV pool in the {product-title} deployment. - -After you select which template you want, you must instantiate the template to be able to use Jenkins. - -.Procedure - -. Create a new Jenkins application using one of the following methods: -** A PV: -+ -[source,terminal] ----- -$ oc new-app jenkins-persistent ----- - -** Or an `emptyDir` type volume where configuration does not persist across pod restarts: -+ -[source,terminal] ----- -$ oc new-app jenkins-ephemeral ----- - -With both templates, you can run `oc describe` on them to see all the parameters available for overriding. - -For example: - -[source,terminal] ----- -$ oc describe jenkins-ephemeral ----- diff --git a/modules/images-other-jenkins-cross-project.adoc b/modules/images-other-jenkins-cross-project.adoc deleted file mode 100644 index 0aa0d3736718..000000000000 --- a/modules/images-other-jenkins-cross-project.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: PROCEDURE -[id="images-other-jenkins-cross-project_{context}"] -= Providing Jenkins cross project access - -If you are going to run Jenkins somewhere other than your same project, you must provide an access token to Jenkins to access your project. - -.Procedure - -. Identify the secret for the service account that has appropriate permissions to access the project Jenkins must access: -+ -[source,terminal] ----- -$ oc describe serviceaccount jenkins ----- -+ -.Example output -[source,terminal] ----- -Name: default -Labels: -Secrets: { jenkins-token-uyswp } - { jenkins-dockercfg-xcr3d } -Tokens: jenkins-token-izv1u - jenkins-token-uyswp ----- -+ -In this case the secret is named `jenkins-token-uyswp`. - -. Retrieve the token from the secret: -+ -[source,terminal] ----- -$ oc describe secret ----- -+ -.Example output -[source,terminal] ----- -Name: jenkins-token-uyswp -Labels: -Annotations: kubernetes.io/service-account.name=jenkins,kubernetes.io/service-account.uid=32f5b661-2a8f-11e5-9528-3c970e3bf0b7 -Type: kubernetes.io/service-account-token -Data -==== -ca.crt: 1066 bytes -token: eyJhbGc......wRA ----- -+ -The token parameter contains the token value Jenkins requires to access the project. diff --git a/modules/images-other-jenkins-customize-s2i.adoc b/modules/images-other-jenkins-customize-s2i.adoc deleted file mode 100644 index b9421af42602..000000000000 --- a/modules/images-other-jenkins-customize-s2i.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-customize-s2i_{context}"] -= Customizing the Jenkins image through source-to-image - -To customize the official {product-title} Jenkins image, you can use the image as a source-to-image (S2I) builder. - -You can use S2I to copy your custom Jenkins jobs definitions, add additional plugins, or replace the provided `config.xml` file with your own, custom, configuration. - -To include your modifications in the Jenkins image, you must have a Git repository with the following directory structure: - -`plugins`:: -This directory contains those binary Jenkins plugins you want to copy into Jenkins. - -`plugins.txt`:: -This file lists the plugins you want to install using the following syntax: - ----- -pluginId:pluginVersion ----- - -`configuration/jobs`:: -This directory contains the Jenkins job definitions. - -`configuration/config.xml`:: -This file contains your custom Jenkins configuration. - -The contents of the `configuration/` directory is copied to the `/var/lib/jenkins/` directory, so you can also include additional files, such as `credentials.xml`, there. - -.Sample build configuration customizes the Jenkins image in {product-title} -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: custom-jenkins-build -spec: - source: <1> - git: - uri: https://github.com/custom/repository - type: Git - strategy: <2> - sourceStrategy: - from: - kind: ImageStreamTag - name: jenkins:2 - namespace: openshift - type: Source - output: <3> - to: - kind: ImageStreamTag - name: custom-jenkins:latest ----- - -<1> The `source` parameter defines the source Git repository with the layout described above. -<2> The `strategy` parameter defines the original Jenkins image to use as a source image for the build. -<3> The `output` parameter defines the resulting, customized Jenkins image that you can use in deployment configurations instead of the official Jenkins image. diff --git a/modules/images-other-jenkins-env-var.adoc b/modules/images-other-jenkins-env-var.adoc deleted file mode 100644 index 8e1569691b4e..000000000000 --- a/modules/images-other-jenkins-env-var.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-env-var_{context}"] -= Jenkins environment variables - -The Jenkins server can be configured with the following environment variables: - -[options="header"] -|=== -| Variable | Definition | Example values and settings - -|`OPENSHIFT_ENABLE_OAUTH` -|Determines whether the {product-title} Login plugin manages authentication when logging in to Jenkins. To enable, set to `true`. -|Default: `false` - -|`JENKINS_PASSWORD` -|The password for the `admin` user when using standard Jenkins authentication. Not applicable when `OPENSHIFT_ENABLE_OAUTH` is set to `true`. -|Default: `password` - -|`JAVA_MAX_HEAP_PARAM`, -`CONTAINER_HEAP_PERCENT`, -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` -|These values control the maximum heap size of the Jenkins JVM. If -`JAVA_MAX_HEAP_PARAM` is set, its value takes precedence. Otherwise, the maximum heap size is dynamically calculated as `CONTAINER_HEAP_PERCENT` of the container memory limit, optionally capped at `JENKINS_MAX_HEAP_UPPER_BOUND_MB` MiB. - -By default, the maximum heap size of the Jenkins JVM is set to 50% of the container memory limit with no cap. -|`JAVA_MAX_HEAP_PARAM` example setting: `-Xmx512m` - -`CONTAINER_HEAP_PERCENT` default: `0.5`, or 50% - -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` example setting: `512 MiB` - -|`JAVA_INITIAL_HEAP_PARAM`, -`CONTAINER_INITIAL_PERCENT` -|These values control the initial heap size of the Jenkins JVM. If `JAVA_INITIAL_HEAP_PARAM` is set, its value takes precedence. Otherwise, the initial heap size is dynamically calculated as `CONTAINER_INITIAL_PERCENT` of the dynamically calculated maximum heap size. - -By default, the JVM sets the initial heap size. -|`JAVA_INITIAL_HEAP_PARAM` example setting: `-Xms32m` - -`CONTAINER_INITIAL_PERCENT` example setting: `0.1`, or 10% - -|`CONTAINER_CORE_LIMIT` -|If set, specifies an integer number of cores used for sizing numbers of internal JVM threads. -|Example setting: `2` - -|`JAVA_TOOL_OPTIONS` -|Specifies options to apply to all JVMs running in this container. It is not recommended to override this value. -|Default: `-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -Dsun.zip.disableMemoryMapping=true` - -|`JAVA_GC_OPTS` -|Specifies Jenkins JVM garbage collection parameters. It is not recommended to override this value. -|Default: `-XX:+UseParallelGC -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10 -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90` - -|`JENKINS_JAVA_OVERRIDES` -|Specifies additional options for the Jenkins JVM. These options are appended to all other options, including the Java options above, and may be used to override any of them if necessary. Separate each additional option with a space; if any option contains space characters, escape them with a backslash. -|Example settings: `-Dfoo -Dbar`; `-Dfoo=first\ value -Dbar=second\ value`. - -|`JENKINS_OPTS` -|Specifies arguments to Jenkins. -| - -|`INSTALL_PLUGINS` -|Specifies additional Jenkins plugins to install when the container is first run or when `OVERRIDE_PV_PLUGINS_WITH_IMAGE_PLUGINS` is set to `true`. Plugins are specified as a comma-delimited list of name:version pairs. -|Example setting: `git:3.7.0,subversion:2.10.2`. - -|`OPENSHIFT_PERMISSIONS_POLL_INTERVAL` -|Specifies the interval in milliseconds that the {product-title} Login plugin polls {product-title} for the permissions that are associated with each user that is defined in Jenkins. -|Default: `300000` - 5 minutes - -|`OVERRIDE_PV_CONFIG_WITH_IMAGE_CONFIG` -|When running this image with an {product-title} persistent volume (PV) for the Jenkins configuration directory, the transfer of configuration from the image to the PV is performed only the first time the image starts because the PV is assigned when the persistent volume claim (PVC) is created. If you create a custom image that extends this image and updates the configuration in the custom image after the initial startup, the configuration is not copied over unless you set this environment variable to `true`. -|Default: `false` - -|`OVERRIDE_PV_PLUGINS_WITH_IMAGE_PLUGINS` -|When running this image with an {product-title} PV for the Jenkins configuration directory, the transfer of plugins from the image to the PV is performed only the first time the image starts because the PV is assigned when the PVC is created. If you create a custom image that extends this image and updates plugins in the custom image after the initial startup, the plugins are not copied over unless you set this environment variable to `true`. -|Default: `false` - -|`ENABLE_FATAL_ERROR_LOG_FILE` -|When running this image with an {product-title} PVC for the Jenkins configuration directory, this environment variable allows the fatal error log file to persist when a fatal error occurs. The fatal error file is saved at `/var/lib/jenkins/logs`. -|Default: `false` - -|`AGENT_BASE_IMAGE` -|Setting this value overrides the image used for the `jnlp` container in the sample Kubernetes plugin pod templates provided with this image. Otherwise, the image from the `jenkins-agent-base-rhel8:latest` image stream tag in the `openshift` namespace is used. -|Default: -`image-registry.openshift-image-registry.svc:5000/openshift/jenkins-agent-base-rhel8:latest` - -|`JAVA_BUILDER_IMAGE` -|Setting this value overrides the image used for the `java-builder` container in the `java-builder` sample Kubernetes plugin pod templates provided with this image. Otherwise, the image from the `java:latest` image stream tag in the `openshift` namespace is used. -|Default: -`image-registry.openshift-image-registry.svc:5000/openshift/java:latest` - -|`JAVA_FIPS_OPTIONS` -|Setting this value controls how the JVM operates when running on a FIPS node. For more information, see link:https://access.redhat.com/documentation/en-us/openjdk/11/html-single/configuring_openjdk_11_on_rhel_with_fips/index#config-fips-in-openjdk[Configure OpenJDK 11 in FIPS mode]. -|Default: `-Dcom.redhat.fips=false` - -|=== diff --git a/modules/images-other-jenkins-kubernetes-plugin.adoc b/modules/images-other-jenkins-kubernetes-plugin.adoc deleted file mode 100644 index 843eb4d9af87..000000000000 --- a/modules/images-other-jenkins-kubernetes-plugin.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-kubernetes-plugin_{context}"] -= Using the Jenkins Kubernetes plugin - -In the following example, the `openshift-jee-sample` `BuildConfig` object causes a Jenkins Maven agent pod to be dynamically provisioned. The pod clones some Java source code, builds a WAR file, and causes a second `BuildConfig`, `openshift-jee-sample-docker` to run. The second `BuildConfig` layers the new WAR file into a container image. - -[IMPORTANT] -==== -{product-title} 4.11 removed the OpenShift Jenkins Maven and NodeJS Agent images from its payload. Red Hat no longer produces these images, and they are not available from the `ocp-tools-4` repository at `registry.redhat.io`. Red Hat maintains the 4.10 and earlier versions of these images for any significant bug fixes or security CVEs, following the link:https://access.redhat.com/support/policy/updates/openshift[{product-title} lifecycle policy]. - -For more information, see the "Important changes to OpenShift Jenkins images" link in the following "Additional resources" section. -==== - -.Sample `BuildConfig` that uses the Jenkins Kubernetes plugin -[source,yaml] ----- -kind: List -apiVersion: v1 -items: -- kind: ImageStream - apiVersion: image.openshift.io/v1 - metadata: - name: openshift-jee-sample -- kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: openshift-jee-sample-docker - spec: - strategy: - type: Docker - source: - type: Docker - dockerfile: |- - FROM openshift/wildfly-101-centos7:latest - COPY ROOT.war /wildfly/standalone/deployments/ROOT.war - CMD $STI_SCRIPTS_PATH/run - binary: - asFile: ROOT.war - output: - to: - kind: ImageStreamTag - name: openshift-jee-sample:latest -- kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: openshift-jee-sample - spec: - strategy: - type: JenkinsPipeline - jenkinsPipelineStrategy: - jenkinsfile: |- - node("maven") { - sh "git clone https://github.com/openshift/openshift-jee-sample.git ." - sh "mvn -B -Popenshift package" - sh "oc start-build -F openshift-jee-sample-docker --from-file=target/ROOT.war" - } - triggers: - - type: ConfigChange ----- - -It is also possible to override the specification of the dynamically created Jenkins agent pod. The following is a modification to the preceding example, which overrides the container memory and specifies an environment variable. - -.Sample `BuildConfig` that uses the Jenkins Kubernetes plugin, specifying memory limit and environment variable -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: openshift-jee-sample -spec: - strategy: - type: JenkinsPipeline - jenkinsPipelineStrategy: - jenkinsfile: |- - podTemplate(label: "mypod", <1> - cloud: "openshift", <2> - inheritFrom: "maven", <3> - containers: [ - containerTemplate(name: "jnlp", <4> - image: "openshift/jenkins-agent-maven-35-centos7:v3.10", <5> - resourceRequestMemory: "512Mi", <6> - resourceLimitMemory: "512Mi", <7> - envVars: [ - envVar(key: "CONTAINER_HEAP_PERCENT", value: "0.25") <8> - ]) - ]) { - node("mypod") { <9> - sh "git clone https://github.com/openshift/openshift-jee-sample.git ." - sh "mvn -B -Popenshift package" - sh "oc start-build -F openshift-jee-sample-docker --from-file=target/ROOT.war" - } - } - triggers: - - type: ConfigChange ----- -<1> A new pod template called `mypod` is defined dynamically. The new pod template name is referenced in the node stanza. -<2> The `cloud` value must be set to `openshift`. -<3> The new pod template can inherit its configuration from an existing pod template. In this case, inherited from the Maven pod template that is pre-defined by {product-title}. -<4> This example overrides values in the pre-existing container, and must be specified by name. All Jenkins agent images shipped with {product-title} use the Container name `jnlp`. -<5> Specify the container image name again. This is a known issue. -<6> A memory request of `512 Mi` is specified. -<7> A memory limit of `512 Mi` is specified. -<8> An environment variable `CONTAINER_HEAP_PERCENT`, with value `0.25`, is specified. -<9> The node stanza references the name of the defined pod template. - -// Writer, remove or update jenkins-agent-maven reference in 4.12 - -By default, the pod is deleted when the build completes. This behavior can be modified with the plugin or within a pipeline Jenkinsfile. - -Upstream Jenkins has more recently introduced a YAML declarative format for defining a `podTemplate` pipeline DSL in-line with your pipelines. An example of this format, using the sample `java-builder` pod template that is defined in the {product-title} Jenkins image: - -[source,yaml] ----- -def nodeLabel = 'java-buidler' - -pipeline { - agent { - kubernetes { - cloud 'openshift' - label nodeLabel - yaml """ -apiVersion: v1 -kind: Pod -metadata: - labels: - worker: ${nodeLabel} -spec: - containers: - - name: jnlp - image: image-registry.openshift-image-registry.svc:5000/openshift/jenkins-agent-base-rhel8:latest - args: ['\$(JENKINS_SECRET)', '\$(JENKINS_NAME)'] - - name: java - image: image-registry.openshift-image-registry.svc:5000/openshift/java:latest - command: - - cat - tty: true -""" - } - } - - options { - timeout(time: 20, unit: 'MINUTES') - } - - stages { - stage('Build App') { - steps { - container("java") { - sh "mvn --version" - } - } - } - } -} ----- diff --git a/modules/images-other-jenkins-memory.adoc b/modules/images-other-jenkins-memory.adoc deleted file mode 100644 index d12411d290c5..000000000000 --- a/modules/images-other-jenkins-memory.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-memory_{context}"] -= Jenkins memory requirements - -When deployed by the provided Jenkins Ephemeral or Jenkins Persistent templates, the default memory limit is `1 Gi`. - -By default, all other process that run in the Jenkins container cannot use more than a total of `512 MiB` of memory. If they require more memory, the container halts. It is therefore highly recommended that pipelines run external commands in an agent container wherever possible. - -And if `Project` quotas allow for it, see recommendations from the Jenkins documentation on what a Jenkins master should have from a memory perspective. Those recommendations proscribe to allocate even more memory for the Jenkins master. - -It is recommended to specify memory request and limit values on agent containers created by the Jenkins Kubernetes plugin. Admin users can set default values on a per-agent image basis through the Jenkins configuration. The memory request and limit parameters can also be overridden on a per-container basis. - -You can increase the amount of memory available to Jenkins by overriding the `MEMORY_LIMIT` parameter when instantiating the Jenkins Ephemeral or Jenkins Persistent template. diff --git a/modules/images-other-jenkins-oauth-auth.adoc b/modules/images-other-jenkins-oauth-auth.adoc deleted file mode 100644 index 493d59fca046..000000000000 --- a/modules/images-other-jenkins-oauth-auth.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-oauth-auth_{context}"] -= {product-title} OAuth authentication - -OAuth authentication is activated by configuring options on the *Configure Global Security* panel in the Jenkins UI, or by setting the `OPENSHIFT_ENABLE_OAUTH` environment variable on the Jenkins *Deployment configuration* to anything other than `false`. This activates the {product-title} Login plugin, which retrieves the configuration information from pod data or by interacting with the {product-title} API server. - -Valid credentials are controlled by the {product-title} identity provider. - -Jenkins supports both browser and non-browser access. - -Valid users are automatically added to the Jenkins authorization matrix at log in, where {product-title} roles dictate the specific Jenkins permissions that users have. The roles used by default are the predefined `admin`, `edit`, and `view`. The login plugin executes self-SAR requests against those roles in the project or namespace that Jenkins is running in. - -Users with the `admin` role have the traditional Jenkins administrative user permissions. Users with the `edit` or `view` role have progressively fewer permissions. - -The default {product-title} `admin`, `edit`, and `view` roles and the Jenkins permissions those roles are assigned in the Jenkins instance are configurable. - -When running Jenkins in an {product-title} pod, the login plugin looks for a config map named `openshift-jenkins-login-plugin-config` in the namespace that Jenkins is running in. - -If this plugin finds and can read in that config map, you can define the role to Jenkins Permission mappings. Specifically: - - * The login plugin treats the key and value pairs in the config map as Jenkins permission to {product-title} role mappings. - * The key is the Jenkins permission group short ID and the Jenkins permission short ID, with those two separated by a hyphen character. - * If you want to add the `Overall Jenkins Administer` permission to an {product-title} role, the key should be `Overall-Administer`. - * To get a sense of which permission groups and permissions IDs are available, go to the matrix authorization page in the Jenkins console and IDs for the groups and individual permissions in the table they provide. - * The value of the key and value pair is the list of {product-title} roles the permission should apply to, with each role separated by a comma. - * If you want to add the `Overall Jenkins Administer` permission to both the default `admin` and `edit` roles, as well as a new Jenkins role you have created, the value for the key `Overall-Administer` would be `admin,edit,jenkins`. - - -[NOTE] -==== -The `admin` user that is pre-populated in the {product-title} Jenkins image with administrative privileges is not given those privileges when {product-title} OAuth is used. To grant these permissions the {product-title} cluster administrator must explicitly define that user in the {product-title} identity provider and assigns the `admin` role to the user. -==== - -Jenkins users' permissions that are stored can be changed after the users are initially established. The {product-title} Login plugin polls the {product-title} API server for permissions and updates the permissions stored in Jenkins for each user with the permissions retrieved from {product-title}. If the Jenkins UI is used to update permissions for a Jenkins user, the permission changes are overwritten the next time the plugin polls {product-title}. - -You can control how often the polling occurs with the `OPENSHIFT_PERMISSIONS_POLL_INTERVAL` environment variable. The default polling interval is five minutes. - -The easiest way to create a new Jenkins service using OAuth authentication is to use a template. diff --git a/modules/images-other-jenkins-permissions.adoc b/modules/images-other-jenkins-permissions.adoc deleted file mode 100644 index a28686174b32..000000000000 --- a/modules/images-other-jenkins-permissions.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-permissions_{context}"] -= Jenkins permissions - -If in the config map the `` element of the pod template XML is the {product-title} service account used for the resulting pod, the service account credentials are mounted into the pod. The permissions are associated with the service account and control which operations against the {product-title} master are allowed from the pod. - -Consider the following scenario with service accounts used for the pod, which is launched by the Kubernetes Plugin that runs in the {product-title} Jenkins image. - -If you use the example template for Jenkins that is provided by {product-title}, the `jenkins` service account is defined with the `edit` role for the project Jenkins runs in, and the master Jenkins pod has that service account mounted. - -The two default Maven and NodeJS pod templates that are injected into the Jenkins configuration are also set to use the same service account as the Jenkins master. - -* Any pod templates that are automatically discovered by the {product-title} sync plugin because their image streams or image stream tags have the required label or annotations are configured to use the Jenkins master service account as their service account. -* For the other ways you can provide a pod template definition into Jenkins and the Kubernetes plugin, you have to explicitly specify the service account to use. Those other ways include the Jenkins console, the `podTemplate` pipeline DSL that is provided by the Kubernetes plugin, or labeling a config map whose data is the XML configuration for a pod template. -* If you do not specify a value for the service account, the `default` service account is used. -* Ensure that whatever service account is used has the necessary permissions, roles, and so on defined within {product-title} to manipulate whatever projects you choose to manipulate from the within the pod. diff --git a/modules/images-pulling-from-private-registries.adoc b/modules/images-pulling-from-private-registries.adoc deleted file mode 100644 index dc0b673a009c..000000000000 --- a/modules/images-pulling-from-private-registries.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/using-image-pull-secrets - -:_content-type: PROCEDURE -[id="images-pulling-from-private-registries_{context}"] -= Pulling from private registries with delegated authentication - -A private registry can delegate authentication to a separate service. In these cases, image pull secrets must be defined for both the authentication and registry endpoints. - -.Procedure - -. Create a secret for the delegated authentication server: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server=sso.redhat.com \ - --docker-username=developer@example.com \ - --docker-password=******** \ - --docker-email=unused \ - redhat-connect-sso - -secret/redhat-connect-sso ----- -+ -. Create a secret for the private registry: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server=privateregistry.example.com \ - --docker-username=developer@example.com \ - --docker-password=******** \ - --docker-email=unused \ - private-registry - -secret/private-registry ----- diff --git a/modules/images-referencing-images-imagestreams.adoc b/modules/images-referencing-images-imagestreams.adoc deleted file mode 100644 index c222a57ba9e8..000000000000 --- a/modules/images-referencing-images-imagestreams.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -:_content-type: PROCEDURE -[id="images-referencing-images-imagestreams_{context}"] -= Referencing images in imagestreams - -You can use tags to reference images in image streams using the following reference types. - -.Imagestream reference types -[width="50%",options="header"] -|=== -|Reference type |Description - -|`ImageStreamTag` -|An `ImageStreamTag` is used to reference or retrieve an image for a given image stream and tag. - -|`ImageStreamImage` -|An `ImageStreamImage` is used to reference or retrieve an image for a given image stream and image `sha` ID. - -|`DockerImage` -|A `DockerImage` is used to reference or retrieve an image for a given external registry. It uses standard Docker `pull specification` for its name. -|=== - -When viewing example image stream definitions you may notice they contain definitions of `ImageStreamTag` and references to `DockerImage`, but nothing related to `ImageStreamImage`. - -This is because the `ImageStreamImage` objects are automatically created in {product-title} when you import or tag an image into the image stream. You should never have to explicitly define an `ImageStreamImage` object in any image stream definition that you use to create image streams. - -.Procedure - -* To reference an image for a given image stream and tag, use `ImageStreamTag`: -+ ----- -: ----- - -* To reference an image for a given image stream and image `sha` ID, use `ImageStreamImage`: -+ ----- -@ ----- -+ -The `` is an immutable identifier for a specific image, also called a -digest. - -* To reference or retrieve an image for a given external registry, use `DockerImage`: -+ ----- -openshift/ruby-20-centos7:2.0 ----- -+ -[NOTE] -==== -When no tag is specified, it is assumed the `latest` tag is used. -==== -+ -You can also reference a third-party registry: -+ ----- -registry.redhat.io/rhel7:latest ----- -+ -Or an image with a digest: -+ ----- -centos/ruby-22-centos7@sha256:3a335d7d8a452970c5b4054ad7118ff134b3a6b50a2bb6d0c07c746e8986b28e ----- diff --git a/modules/images-remove-tag-imagestream.adoc b/modules/images-remove-tag-imagestream.adoc deleted file mode 100644 index a97922e3e970..000000000000 --- a/modules/images-remove-tag-imagestream.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -:_content-type: PROCEDURE -[id="images-remove-tag-imagestream_{context}"] -= Removing tags from image streams - -You can remove tags from an image stream. - -.Procedure - -* To remove a tag completely from an image stream run: -+ -[source,terminal] ----- -$ oc delete istag/ruby:latest ----- -+ -or: -+ -[source,terminal] ----- -$ oc tag -d ruby:latest ----- diff --git a/modules/images-s2i-build-process-overview.adoc b/modules/images-s2i-build-process-overview.adoc deleted file mode 100644 index 006552f1558d..000000000000 --- a/modules/images-s2i-build-process-overview.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc - -:_content-type: CONCEPT -[id="images-s2i-build-process-overview_{context}"] -= Source-to-image build process overview - -Source-to-image (S2I) produces ready-to-run images by injecting source code into a container that prepares that source code to be run. It performs the following steps: - -. Runs the `FROM ` command -. Copies the source code to a defined location in the builder image -. Runs the assemble script in the builder image -. Sets the run script in the builder image as the default command - -Buildah then creates the container image. diff --git a/modules/images-samples-operator-deprecated-image-stream.adoc b/modules/images-samples-operator-deprecated-image-stream.adoc deleted file mode 100644 index a721d87db974..000000000000 --- a/modules/images-samples-operator-deprecated-image-stream.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/configuring-samples-operator.adoc - - -:_content-type: PROCEDURE -[id="images-samples-operator-deprecated-image-stream_{context}"] -= Removing deprecated image stream tags from the Cluster Samples Operator - -The Cluster Samples Operator leaves deprecated image stream tags in an image stream because users can have deployments that use the deprecated image stream tags. - -You can remove deprecated image stream tags by editing the image stream with the `oc tag` command. - -[NOTE] -==== -Deprecated image stream tags that the samples providers have removed from their image streams are not included on initial installations. -==== - -.Prerequisites - -* You installed the `oc` CLI. - -.Procedure - -* Remove deprecated image stream tags by editing the image stream with the `oc tag` command. -+ -[source,terminal] ----- -$ oc tag -d ----- -+ -.Example output -[source,terminal] ----- -Deleted tag default/. ----- - -//Similar procedure in images-imagestreams-remove-tag.adoc diff --git a/modules/images-tag.adoc b/modules/images-tag.adoc deleted file mode 100644 index c6156c9fbb96..000000000000 --- a/modules/images-tag.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.adoc -// * openshift_images/tagging-images.adoc - -[id="images-tag_{context}"] -= Image tags - -An image tag is a label applied to a container image in a repository that distinguishes a specific image from other images in an image stream. Typically, the tag represents a version number of some sort. For example, here `:v3.11.59-2` is the tag: - -[source,text] ----- -registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.11.59-2 ----- - -You can add additional tags to an image. For example, an image might be assigned the tags `:v3.11.59-2` and `:latest`. - -{product-title} provides the `oc tag` command, which is similar to the `docker tag` command, but operates on image streams instead of directly on images. diff --git a/modules/images-tagging-conventions.adoc b/modules/images-tagging-conventions.adoc deleted file mode 100644 index b346a415d53c..000000000000 --- a/modules/images-tagging-conventions.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -[id="images-tagging-conventions_{context}"] -= Image tag conventions - -Images evolve over time and their tags reflect this. Generally, an image tag always points to the latest image built. - -If there is too much information embedded in a tag name, like `v2.0.1-may-2019`, the tag points to just one revision of an image and is never updated. Using default image pruning options, such an image is never removed. -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -In very large clusters, the schema of creating new tags for every revised image could eventually fill up the etcd datastore with excess tag metadata for images that are long outdated. -endif::[] - -If the tag is named `v2.0`, image revisions are more likely. This results in longer tag history and, therefore, the image pruner is more likely to remove old and unused images. - -Although tag naming convention is up to you, here are a few examples in the format `:`: - -.Image tag naming conventions -[width="50%",options="header"] -|=== -|Description |Example - -|Revision -|`myimage:v2.0.1` - -|Architecture -|`myimage:v2.0-x86_64` - -|Base image -|`myimage:v1.2-centos7` - -|Latest (potentially unstable) -|`myimage:latest` - -|Latest stable -|`myimage:stable` -|=== - -If you require dates in tag names, periodically inspect old and unsupported images and `istags` and remove them. Otherwise, you can experience increasing resource usage caused by retaining old images. diff --git a/modules/images-test-s2i.adoc b/modules/images-test-s2i.adoc deleted file mode 100644 index 447fde8ce773..000000000000 --- a/modules/images-test-s2i.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -:_content-type: CONCEPT -[id="images-test-s2i_{context}"] -= About testing source-to-image images - -As an Source-to-Image (S2I) builder image author, you can test your S2I image -locally and use the {product-title} build system for automated testing and -continuous integration. - -S2I requires the -`assemble` and `run` scripts to be present to successfully run -the S2I build. Providing the `save-artifacts` script reuses the build -artifacts, and providing the `usage` script ensures that usage information is -printed to console when someone runs the container image outside of the S2I. - -The goal of testing an S2I image is to make sure that all of these described -commands work properly, even if the base container image has changed or the tooling -used by the commands was updated. - -[id="images-test-s2i-testing-requirements_{context}"] -== Understanding testing requirements - -The standard location for the `test` script is `test/run`. This script is -invoked by the {product-title} S2I image builder and it could be a simple Bash -script or a static Go binary. - -The `test/run` script performs the S2I build, so you must have the S2I binary -available in your `$PATH`. If required, follow the installation instructions -in the -https://github.com/openshift/source-to-image/blob/master/README.md#installation[S2I -README]. - -S2I combines the application source code and builder image, so to test -it you need a sample application source to verify that the source successfully -transforms into a runnable container image. The sample application should be simple, -but it should exercise the crucial steps of `assemble` and `run` scripts. - -[id="images-test-s2i-generating-scripts-and-tools_{context}"] -== Generating scripts and tools - -The S2I tooling comes with powerful generation tools to speed up the process of -creating a new S2I image. The `s2i create` command produces all the necessary S2I -scripts and testing tools along with the `Makefile`: - -[source,termnal] ----- -$ s2i create __ __ ----- - -The generated `test/run` script must be adjusted to be -useful, but it provides a good starting point to begin developing. - -[NOTE] -==== -The `test/run` script produced by the `s2i create` command requires that the sample application sources are inside the `test/test-app` directory. -==== -ifndef::openshift-online[] -[id="images-test-s21-testing-locally_{context}"] -== Testing locally -The easiest way to run the S2I image tests locally is to use the generated -`Makefile`. - -If you did not use the `s2i create` command, you can copy the -following `Makefile` template and replace the `IMAGE_NAME` parameter with -your image name. - -.Sample `Makefile` ----- -IMAGE_NAME = openshift/ruby-20-centos7 -CONTAINER_ENGINE := $(shell command -v podman 2> /dev/null | echo docker) - -build: - ${CONTAINER_ENGINE} build -t $(IMAGE_NAME) . - -.PHONY: test -test: - ${CONTAINER_ENGINE} build -t $(IMAGE_NAME)-candidate . - IMAGE_NAME=$(IMAGE_NAME)-candidate test/run ----- - -[id="images-test-s21-basic-testing-workflow_{context}"] -== Basic testing workflow - -The `test` script assumes you have already built the image you want to -test. If required, first build the S2I image. Run one of the following commands: - -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman build -t ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker build -t ----- - -The following steps describe the default workflow to test S2I image builders: - -. Verify the `usage` script is working: -+ -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman run . ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker run . ----- - -. Build the image: -+ -[source,terminal] -[options="nowrap"] ----- -$ s2i build file:///path-to-sample-app __ __ ----- - -. Optional: if you support `save-artifacts`, run step 2 once again to -verify that saving and restoring artifacts works properly. - -. Run the container: -+ -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman run ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker run ----- - -. Verify the container is running and the application is responding. - -Running these steps is generally enough to tell if the builder image is -working as expected. - - -[id="images-test-s21-using-openshift-for-building-the-image_{context}"] -== Using {product-title} for building the image - -Once you have a `Dockerfile` and the other artifacts that make up your new -S2I builder image, you can put them in a git repository and use {product-title} -to build and push the image. Define a Docker build that points -to your repository. - -If your {product-title} instance is hosted on a public IP address, the build can -be triggered each time you push into your S2I builder image GitHub repository. - -You can also use the `ImageChangeTrigger` to trigger a rebuild of your applications that are -based on the S2I builder image you updated. -endif::openshift-online[] diff --git a/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc b/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc deleted file mode 100644 index 246cb30bd97e..000000000000 --- a/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/triggering-updates-on-imagestream-changes.adoc - - -[id="images-triggering-updates-imagestream-changes-kubernetes-about_{context}"] -= Triggering Kubernetes resources - -Kubernetes resources do not have fields for triggering, unlike deployment and build configurations, which include as part of their API definition a set of fields for controlling triggers. Instead, you can use annotations in {product-title} to request triggering. - -The annotation is defined as follows: - -[source,yaml] ----- -Key: image.openshift.io/triggers -Value: -[ - { - "from": { - "kind": "ImageStreamTag", <1> - "name": "example:latest", <2> - "namespace": "myapp" <3> - }, - "fieldPath": "spec.template.spec.containers[?(@.name==\"web\")].image", <4> - "paused": false <5> - }, - ... -] ----- -<1> Required: `kind` is the resource to trigger from must be `ImageStreamTag`. -<2> Required: `name` must be the name of an image stream tag. -<3> Optional: `namespace` defaults to the namespace of the object. -<4> Required: `fieldPath` is the JSON path to change. This field is limited and accepts only a JSON path expression that precisely matches a container by ID or index. For pods, the JSON path is "spec.containers[?(@.name='web')].image". -<5> Optional: `paused` is whether or not the trigger is paused, and the default value is `false`. Set `paused` to `true` to temporarily disable this trigger. - -When one of the core Kubernetes resources contains both a pod template and this annotation, {product-title} attempts to update the object by using the image currently associated with the image stream tag that is referenced by trigger. The update is performed against the `fieldPath` specified. - -Examples of core Kubernetes resources that can contain both a pod template and annotation include: - -* `CronJobs` -* `Deployments` -* `StatefulSets` -* `DaemonSets` -* `Jobs` -* `ReplicationControllers` -* `Pods` diff --git a/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc b/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc deleted file mode 100644 index 03e76d91a1e3..000000000000 --- a/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/triggering-updates-on-imagestream-changes.adoc - - -:_content-type: PROCEDURE -[id="images-triggering-updates-imagestream-changes-kubernetes-cli_{context}"] -= Setting the image trigger on Kubernetes resources - -When adding an image trigger to deployments, you can use the `oc set triggers` command. For example, the sample command in this procedure adds an image change trigger to the deployment named `example` so that when the `example:latest` image stream tag is updated, the `web` container inside the deployment updates with the new image value. This command sets the correct `image.openshift.io/triggers` annotation on the deployment resource. - -.Procedure - -* Trigger Kubernetes resources by entering the `oc set triggers` command: -+ -[source,terminal] ----- -$ oc set triggers deploy/example --from-image=example:latest -c web ----- - -Unless the deployment is paused, this pod template update automatically causes a deployment to occur with the new image value. diff --git a/modules/images-update-global-pull-secret.adoc b/modules/images-update-global-pull-secret.adoc deleted file mode 100644 index a70466732428..000000000000 --- a/modules/images-update-global-pull-secret.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/managing_images/using-image-pull-secrets.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc -// * support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// -// Not included, but linked to from: -// * operators/admin/olm-managing-custom-catalogs.adoc - -ifeval::["{context}" == "using-image-pull-secrets"] -:image-pull-secrets: -endif::[] - -:_content-type: PROCEDURE -[id="images-update-global-pull-secret_{context}"] -= Updating the global cluster pull secret - -You can update the global pull secret for your cluster by either replacing the current pull secret or appending a new pull secret. - -ifndef::image-pull-secrets[] -The procedure is required when users use a separate registry to store images than the registry used during installation. -endif::image-pull-secrets[] - -ifdef::image-pull-secrets[] -[IMPORTANT] -==== -To transfer your cluster to another owner, you must first initiate the transfer in {cluster-manager-url}, and then update the pull secret on the cluster. Updating a cluster's pull secret without initiating the transfer in {cluster-manager} causes the cluster to stop reporting Telemetry metrics in {cluster-manager}. - -For more information link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2021/html/managing_clusters/assembly-managing-clusters#transferring-cluster-ownership_assembly-managing-clusters[about transferring cluster ownership], see "Transferring cluster ownership" in the {cluster-manager-first} documentation. -==== -endif::image-pull-secrets[] - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure -. Optional: To append a new pull secret to the existing pull secret, complete the following steps: - -.. Enter the following command to download the pull secret: -+ -[source,terminal] ----- -$ oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' > <1> ----- -<1> Provide the path to the pull secret file. - -.. Enter the following command to add the new pull secret: -+ -[source,terminal] ----- -$ oc registry login --registry="" \ <1> ---auth-basic=":" \ <2> ---to= <3> ----- -<1> Provide the new registry. You can include multiple repositories within the same registry, for example: `--registry=""`. -<2> Provide the credentials of the new registry. -<3> Provide the path to the pull secret file. -+ -Alternatively, you can perform a manual update to the pull secret file. - -. Enter the following command to update the global pull secret for your cluster: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson= <1> ----- -<1> Provide the path to the new pull secret file. -+ -This update is rolled out to all nodes, which can take some time depending on the size of your cluster. -+ -[NOTE] -==== -As of {product-title} 4.7.4, changes to the global pull secret no longer trigger a node drain or reboot. -==== -//Also referred to as the cluster-wide pull secret. - - -ifeval::["{context}" == "using-image-pull-secrets"] -:!image-pull-secrets: -endif::[] diff --git a/modules/images-using-customizing-s2i-images-scripts-embedded.adoc b/modules/images-using-customizing-s2i-images-scripts-embedded.adoc deleted file mode 100644 index 4c32e6efc5a7..000000000000 --- a/modules/images-using-customizing-s2i-images-scripts-embedded.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/customizing-s2i-images.adoc - -:_content-type: PROCEDURE -[id="images-using-customizing-s2i-images-scripts-embedded_{context}"] -= Invoking scripts embedded in an image - -Builder images provide their own version of the source-to-image (S2I) scripts that cover the most common use-cases. If these scripts do not fulfill your needs, S2I provides a way of overriding them by adding custom ones in the `.s2i/bin` directory. However, by doing this, you are completely replacing the standard scripts. In some cases, replacing the scripts is acceptable, but, in other scenarios, you can run a few commands before or after the scripts while retaining the logic of the script provided in the image. To reuse the standard scripts, you can create a wrapper script that runs custom logic and delegates further work to the default scripts in the image. - -.Procedure - -. Look at the value of the `io.openshift.s2i.scripts-url` label to determine the location of the scripts inside of the builder image: -+ -[source,terminal] ----- -$ podman inspect --format='{{ index .Config.Labels "io.openshift.s2i.scripts-url" }}' wildfly/wildfly-centos7 ----- -+ -.Example output -[source,terminal] ----- -image:///usr/libexec/s2i ----- -+ -You inspected the `wildfly/wildfly-centos7` builder image and found out that the scripts are in the `/usr/libexec/s2i` directory. -+ -. Create a script that includes an invocation of one of the standard scripts wrapped in other commands: -+ -.`.s2i/bin/assemble` script -[source,bash] ----- -#!/bin/bash -echo "Before assembling" - -/usr/libexec/s2i/assemble -rc=$? - -if [ $rc -eq 0 ]; then - echo "After successful assembling" -else - echo "After failed assembling" -fi - -exit $rc ----- -+ -This example shows a custom assemble script that prints the message, runs the standard assemble script from the image, and prints another message depending on the exit code of the assemble script. -+ -[IMPORTANT] -==== -When wrapping the run script, you must use `exec` for invoking it to ensure signals are handled properly. The use of `exec` also precludes the ability to run additional commands after invoking the default image run script. -==== -+ -.`.s2i/bin/run` script -[source,bash] ----- -#!/bin/bash -echo "Before running application" -exec /usr/libexec/s2i/run ----- diff --git a/modules/images-using-imagestream-change-triggers.adoc b/modules/images-using-imagestream-change-triggers.adoc deleted file mode 100644 index 3f83a2b8bf47..000000000000 --- a/modules/images-using-imagestream-change-triggers.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-change-triggers_{context}"] -= Image stream change triggers - -Image stream triggers allow your builds and deployments to be automatically -invoked when a new version of an upstream image is available. - -//from FAQ - -For example, builds and deployments can be automatically started when an image -stream tag is modified. This is achieved by monitoring that particular image -stream tag and notifying the build or deployment when a change is detected. diff --git a/modules/images-using-imagestream-images.adoc b/modules/images-using-imagestream-images.adoc deleted file mode 100644 index 11c807396ece..000000000000 --- a/modules/images-using-imagestream-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-images_{context}"] -= Image stream images - -An image stream image points from within an image stream to a particular image ID. - -Image stream images allow you to retrieve metadata about an image from a particular image stream where it is tagged. - -Image stream image objects are automatically created in {product-title} whenever you import or tag an image into the image stream. You should never have to explicitly define an image stream image object in any image stream definition that you use to create image streams. - -The image stream image consists of the image stream name and image ID from the repository, delimited by an `@` sign: - ----- -@ ----- - -To refer to the image in the `ImageStream` object example, the image stream image looks like: - ----- -origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d ----- diff --git a/modules/images-using-imagestream-tags.adoc b/modules/images-using-imagestream-tags.adoc deleted file mode 100644 index a0226e7b35e2..000000000000 --- a/modules/images-using-imagestream-tags.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-tags_{context}"] -= Image stream tags - -An image stream tag is a named pointer to an image in an image stream. It is abbreviated as `istag`. An image stream tag is used to reference or retrieve an image for a given image stream and tag. - -Image stream tags can reference any local or externally managed image. It contains a history of images represented as a stack of all images the tag ever pointed to. Whenever a new or existing image is tagged under particular image stream tag, it is placed at the first position in the history stack. The image previously occupying the top position is available at the second position. This allows for easy rollbacks to make tags point to historical images again. - -The following image stream tag is from an `ImageStream` object: - -.Image stream tag with two images in its history - -[source,yaml] ----- - tags: - - items: - - created: 2017-09-02T10:15:09Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d - generation: 2 - image: sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 - - created: 2017-09-01T13:40:11Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 - generation: 1 - image: sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d - tag: latest ----- - -Image stream tags can be permanent tags or tracking tags. - -* Permanent tags are version-specific tags that point to a particular version of an image, such as Python 3.5. - -* Tracking tags are reference tags that follow another image stream tag and can be updated to change which image they follow, like a symlink. These new levels are not guaranteed to be backwards-compatible. -+ -For example, the `latest` image stream tags that ship with {product-title} are tracking tags. This means consumers of the `latest` image stream tag are updated to the newest level of the framework provided by the image when a new level becomes available. A `latest` image stream tag to `v3.10` can be changed to `v3.11` at any time. It is important to be aware that these `latest` image stream tags behave differently than the Docker `latest` tag. The `latest` image stream tag, in this case, does not point to the latest image in the Docker repository. It points to another image stream tag, which might not be the latest version of an image. For example, if the `latest` image stream tag points to `v3.10` of an image, when the `3.11` version is released, the `latest` tag is not automatically updated to `v3.11`, and remains at `v3.10` until it is manually updated to point to a `v3.11` image stream tag. -+ -[NOTE] -==== -Tracking tags are limited to a single image stream and cannot reference other -image streams. -==== - -You can create your own image stream tags for your own needs. - -The image stream tag is composed of the name of the image stream and a tag, -separated by a colon: - ----- -: ----- - -For example, to refer to the -`sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d` image -in the `ImageStream` object example earlier, the image stream tag -would be: - ----- -origin-ruby-sample:latest ----- diff --git a/modules/impersonation-project-creation.adoc b/modules/impersonation-project-creation.adoc deleted file mode 100644 index 8dce08473d60..000000000000 --- a/modules/impersonation-project-creation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/creating-project-other-user.adoc - -:_content-type: PROCEDURE -[id="impersonation-project-creation_{context}"] -= Impersonating a user when you create a project - -You can impersonate a different user when you create a project request. Because -`system:authenticated:oauth` is the only bootstrap group that can -create project requests, you must impersonate that group. - -.Procedure - -* To create a project request on behalf of a different user: -+ -[source,terminal] ----- -$ oc new-project --as= \ - --as-group=system:authenticated --as-group=system:authenticated:oauth ----- diff --git a/modules/impersonation-system-admin-group.adoc b/modules/impersonation-system-admin-group.adoc deleted file mode 100644 index 8d47b654141b..000000000000 --- a/modules/impersonation-system-admin-group.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * users_and_roles/impersonating-system-admin.adoc - -:_content-type: PROCEDURE -[id="impersonation-system-admin-group_{context}"] -= Impersonating the system:admin group - - -When a `system:admin` user is granted cluster administration permissions through a group, you must include the -`--as= --as-group= --as-group=` parameters in the command to impersonate the associated groups. - -.Procedure - -* To grant a user permission to impersonate a `system:admin` by impersonating the associated cluster administration groups, -run the following command: -+ -[source,terminal] ----- -$ oc create clusterrolebinding --clusterrole=sudoer --as= \ ---as-group= --as-group= ----- diff --git a/modules/impersonation-system-admin-user.adoc b/modules/impersonation-system-admin-user.adoc deleted file mode 100644 index 86bd63c3e363..000000000000 --- a/modules/impersonation-system-admin-user.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * users_and_roles/impersonating-system-admin.adoc - -:_content-type: PROCEDURE -[id="impersonation-system-admin-user_{context}"] -= Impersonating the system:admin user - -You can grant a user permission to impersonate `system:admin`, which grants them -cluster administrator permissions. - -.Procedure - -* To grant a user permission to impersonate `system:admin`, run the following command: -+ -[source,terminal] ----- -$ oc create clusterrolebinding --clusterrole=sudoer --user= ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to grant permission to impersonate `system:admin`: - -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sudoer -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: ----- -==== diff --git a/modules/importing-manifest-list-through-imagestreamimport.adoc b/modules/importing-manifest-list-through-imagestreamimport.adoc deleted file mode 100644 index f18ebf11ae0b..000000000000 --- a/modules/importing-manifest-list-through-imagestreamimport.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-manage.adoc - -:_content-type: PROCEDURE -[id="importing-manifest-list-through-imagestreamimport_{context}"] -= Importing a manifest list through ImageStreamImport - - -You can use the `ImageStreamImport` resource to find and import image manifests from other container image registries into the cluster. Individual images or an entire image repository can be imported. - -Use the following procedure to import a manifest list through the `ImageStreamImport` object with the `importMode` value. - -.Procedure - -. Create an `ImageStreamImport` YAML file and set the `importMode` parameter to `PreserveOriginal` on the tags that you will import as a manifest list: -+ -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStreamImport -metadata: - name: app - namespace: myapp -spec: - import: true - images: - - from: - kind: DockerImage - name: // - to: - name: latest - referencePolicy: - type: Source - importPolicy: - importMode: "PreserveOriginal" ----- - -. Create the `ImageStreamImport` by running the following command: -+ -[source,terminal] ----- -$ oc create -f ----- - diff --git a/modules/importmode-configuration-fields.adoc b/modules/importmode-configuration-fields.adoc deleted file mode 100644 index 078d52eeb64b..000000000000 --- a/modules/importmode-configuration-fields.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * assembly/openshift_images/managing-image-streams.adoc - -:_content-type: CONCEPT -[id="importmode-configuration-fields_{context}"] -= Configuration fields for --import-mode - -The following table describes the options available for the `--import-mode=` flag: - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -| *Legacy* | The default option for `--import-mode`. When specified, the manifest list is discarded, and a single sub-manifest is imported. The platform is chosen in the following order of priority: - -. Tag annotations -. Control plane architecture -. Linux/AMD64 -. The first manifest in the list - -| *PreserveOriginal* | When specified, the original manifest is preserved. For manifest lists, the manifest list and all of its sub-manifests are imported. - -|=== \ No newline at end of file diff --git a/modules/infrastructure-components.adoc b/modules/infrastructure-components.adoc deleted file mode 100644 index 2f1a2d60040b..000000000000 --- a/modules/infrastructure-components.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * nodes-nodes-creating-infrastructure-nodes.adoc - -[id="infrastructure-components_{context}"] -= {product-title} infrastructure components - -The following infrastructure workloads do not incur {product-title} worker subscriptions: - -* Kubernetes and {product-title} control plane services that run on masters -* The default router -* The integrated container image registry -* The HAProxy-based Ingress Controller -* The cluster metrics collection, or monitoring service, including components for monitoring user-defined projects -* Cluster aggregated logging -* Service brokers -* Red Hat Quay -* {rh-storage-first} -* Red Hat Advanced Cluster Manager -* Red Hat Advanced Cluster Security for Kubernetes -* Red Hat OpenShift GitOps -* Red Hat OpenShift Pipelines - -// Updated the list to match the list under "Red Hat OpenShift control plane and infrastructure nodes" in https://www.redhat.com/en/resources/openshift-subscription-sizing-guide - -Any node that runs any other container, pod, or component is a worker node that your subscription must cover. diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc deleted file mode 100644 index 2b1a89a59aa2..000000000000 --- a/modules/infrastructure-moving-logging.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * logging/cluster-logging-moving.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-logging_{context}"] -= Moving OpenShift Logging resources - -You can configure the Cluster Logging Operator to deploy the pods for {logging} components, such as Elasticsearch and Kibana, to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. - -For example, you can move the Elasticsearch pods to a separate node because of high CPU, memory, and disk requirements. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. These features are not installed by default. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -... - -spec: - collection: - logs: - fluentd: - resources: null - type: fluentd - logStore: - elasticsearch: - nodeCount: 3 - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - redundancyPolicy: SingleRedundancy - resources: - limits: - cpu: 500m - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - storage: {} - type: elasticsearch - managementState: Managed - visualization: - kibana: - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - proxy: - resources: null - replicas: 1 - resources: null - type: kibana - -... ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -.Verification - -To verify that a component has moved, you can use the `oc get pod -o wide` command. - -For example: - -* You want to move the Kibana pod from the `ip-10-0-147-79.us-east-2.compute.internal` node: -+ -[source,terminal] ----- -$ oc get pod kibana-5b8bdf44f9-ccpq9 -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -kibana-5b8bdf44f9-ccpq9 2/2 Running 0 27s 10.129.2.18 ip-10-0-147-79.us-east-2.compute.internal ----- - -* You want to move the Kibana pod to the `ip-10-0-139-48.us-east-2.compute.internal` node, a dedicated infrastructure node: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-133-216.us-east-2.compute.internal Ready master 60m v1.27.3 -ip-10-0-139-146.us-east-2.compute.internal Ready master 60m v1.27.3 -ip-10-0-139-192.us-east-2.compute.internal Ready worker 51m v1.27.3 -ip-10-0-139-241.us-east-2.compute.internal Ready worker 51m v1.27.3 -ip-10-0-147-79.us-east-2.compute.internal Ready worker 51m v1.27.3 -ip-10-0-152-241.us-east-2.compute.internal Ready master 60m v1.27.3 -ip-10-0-139-48.us-east-2.compute.internal Ready infra 51m v1.27.3 ----- -+ -Note that the node has a `node-role.kubernetes.io/infra: ''` label: -+ -[source,terminal] ----- -$ oc get node ip-10-0-139-48.us-east-2.compute.internal -o yaml ----- -+ -.Example output -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: ip-10-0-139-48.us-east-2.compute.internal - selfLink: /api/v1/nodes/ip-10-0-139-48.us-east-2.compute.internal - uid: 62038aa9-661f-41d7-ba93-b5f1b6ef8751 - resourceVersion: '39083' - creationTimestamp: '2020-04-13T19:07:55Z' - labels: - node-role.kubernetes.io/infra: '' -... ----- - -* To move the Kibana pod, edit the `ClusterLogging` CR to add a node selector: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -... - -spec: - -... - - visualization: - kibana: - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - proxy: - resources: null - replicas: 1 - resources: null - type: kibana ----- -<1> Add a node selector to match the label in the node specification. - -* After you save the CR, the current Kibana pod is terminated and new pod is deployed: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-84d98649c4-zb9g7 1/1 Running 0 29m -elasticsearch-cdm-hwv01pf7-1-56588f554f-kpmlg 2/2 Running 0 28m -elasticsearch-cdm-hwv01pf7-2-84c877d75d-75wqj 2/2 Running 0 28m -elasticsearch-cdm-hwv01pf7-3-f5d95b87b-4nx78 2/2 Running 0 28m -fluentd-42dzz 1/1 Running 0 28m -fluentd-d74rq 1/1 Running 0 28m -fluentd-m5vr9 1/1 Running 0 28m -fluentd-nkxl7 1/1 Running 0 28m -fluentd-pdvqb 1/1 Running 0 28m -fluentd-tflh6 1/1 Running 0 28m -kibana-5b8bdf44f9-ccpq9 2/2 Terminating 0 4m11s -kibana-7d85dcffc8-bfpfp 2/2 Running 0 33s ----- - -* The new pod is on the `ip-10-0-139-48.us-east-2.compute.internal` node: -+ -[source,terminal] ----- -$ oc get pod kibana-7d85dcffc8-bfpfp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -kibana-7d85dcffc8-bfpfp 2/2 Running 0 43s 10.131.0.22 ip-10-0-139-48.us-east-2.compute.internal ----- - -* After a few moments, the original Kibana pod is removed. -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-84d98649c4-zb9g7 1/1 Running 0 30m -elasticsearch-cdm-hwv01pf7-1-56588f554f-kpmlg 2/2 Running 0 29m -elasticsearch-cdm-hwv01pf7-2-84c877d75d-75wqj 2/2 Running 0 29m -elasticsearch-cdm-hwv01pf7-3-f5d95b87b-4nx78 2/2 Running 0 29m -fluentd-42dzz 1/1 Running 0 29m -fluentd-d74rq 1/1 Running 0 29m -fluentd-m5vr9 1/1 Running 0 29m -fluentd-nkxl7 1/1 Running 0 29m -fluentd-pdvqb 1/1 Running 0 29m -fluentd-tflh6 1/1 Running 0 29m -kibana-7d85dcffc8-bfpfp 2/2 Running 0 62s ----- diff --git a/modules/infrastructure-moving-monitoring.adoc b/modules/infrastructure-moving-monitoring.adoc deleted file mode 100644 index 13273b9b8e9a..000000000000 --- a/modules/infrastructure-moving-monitoring.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-monitoring_{context}"] -= Moving the monitoring solution - -The monitoring stack includes multiple components, including Prometheus, Thanos Querier, and Alertmanager. -The Cluster Monitoring Operator manages this stack. To redeploy the monitoring stack to infrastructure nodes, you can create and apply a custom config map. - -.Procedure - -. Edit the `cluster-monitoring-config` config map and change the `nodeSelector` to use the `infra` label: -+ -[source,terminal] ----- -$ oc edit configmap cluster-monitoring-config -n openshift-monitoring ----- -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: |+ - alertmanagerMain: - nodeSelector: <1> - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - prometheusK8s: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - prometheusOperator: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - k8sPrometheusAdapter: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - kubeStateMetrics: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - telemeterClient: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - openshiftStateMetrics: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - thanosQuerier: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -. Watch the monitoring pods move to the new machines: -+ -[source,terminal] ----- -$ watch 'oc get pod -n openshift-monitoring -o wide' ----- - -. If a component has not moved to the `infra` node, delete the pod with this component: -+ -[source,terminal] ----- -$ oc delete pod -n openshift-monitoring ----- -+ -The component from the deleted pod is re-created on the `infra` node. diff --git a/modules/infrastructure-moving-registry.adoc b/modules/infrastructure-moving-registry.adoc deleted file mode 100644 index 9d70bb2badeb..000000000000 --- a/modules/infrastructure-moving-registry.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-registry_{context}"] -= Moving the default registry - -You configure the registry Operator to deploy its pods to different nodes. - -.Prerequisites - -* Configure additional compute machine sets in your {product-title} cluster. - -.Procedure - -. View the `config/instance` object: -+ -[source,terminal] ----- -$ oc get configs.imageregistry.operator.openshift.io/cluster -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: imageregistry.operator.openshift.io/v1 -kind: Config -metadata: - creationTimestamp: 2019-02-05T13:52:05Z - finalizers: - - imageregistry.operator.openshift.io/finalizer - generation: 1 - name: cluster - resourceVersion: "56174" - selfLink: /apis/imageregistry.operator.openshift.io/v1/configs/cluster - uid: 36fd3724-294d-11e9-a524-12ffeee2931b -spec: - httpSecret: d9a012ccd117b1e6616ceccb2c3bb66a5fed1b5e481623 - logging: 2 - managementState: Managed - proxy: {} - replicas: 1 - requests: - read: {} - write: {} - storage: - s3: - bucket: image-registry-us-east-1-c92e88cad85b48ec8b312344dff03c82-392c - region: us-east-1 -status: -... ----- - -. Edit the `config/instance` object: -+ -[source,terminal] ----- -$ oc edit configs.imageregistry.operator.openshift.io/cluster ----- -+ -[source,yaml] ----- -spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - namespaces: - - openshift-image-registry - topologyKey: kubernetes.io/hostname - weight: 100 - logLevel: Normal - managementState: Managed - nodeSelector: <1> - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -. Verify the registry pod has been moved to the infrastructure node. -+ -.. Run the following command to identify the node where the registry pod is located: -+ -[source,terminal] ----- -$ oc get pods -o wide -n openshift-image-registry ----- -+ -.. Confirm the node has the label you specified: -+ -[source,terminal] ----- -$ oc describe node ----- -+ -Review the command output and confirm that `node-role.kubernetes.io/infra` is in the `LABELS` list. diff --git a/modules/infrastructure-moving-router.adoc b/modules/infrastructure-moving-router.adoc deleted file mode 100644 index 3e4a8ab0a087..000000000000 --- a/modules/infrastructure-moving-router.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-router_{context}"] -= Moving the router - -You can deploy the router pod to a different compute machine set. By default, the pod is deployed to a worker node. - -.Prerequisites - -* Configure additional compute machine sets in your {product-title} cluster. - -.Procedure - -. View the `IngressController` custom resource for the router Operator: -+ -[source,terminal] ----- -$ oc get ingresscontroller default -n openshift-ingress-operator -o yaml ----- -+ -The command output resembles the following text: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: 2019-04-18T12:35:39Z - finalizers: - - ingresscontroller.operator.openshift.io/finalizer-ingresscontroller - generation: 1 - name: default - namespace: openshift-ingress-operator - resourceVersion: "11341" - selfLink: /apis/operator.openshift.io/v1/namespaces/openshift-ingress-operator/ingresscontrollers/default - uid: 79509e05-61d6-11e9-bc55-02ce4781844a -spec: {} -status: - availableReplicas: 2 - conditions: - - lastTransitionTime: 2019-04-18T12:36:15Z - status: "True" - type: Available - domain: apps..example.com - endpointPublishingStrategy: - type: LoadBalancerService - selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default ----- - -. Edit the `ingresscontroller` resource and change the `nodeSelector` to use the `infra` label: -+ -[source,terminal] ----- -$ oc edit ingresscontroller default -n openshift-ingress-operator ----- -+ -[source,yaml] ----- - spec: - nodePlacement: - nodeSelector: <1> - matchLabels: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrastructure node, also add a matching toleration. - -. Confirm that the router pod is running on the `infra` node. -.. View the list of router pods and note the node name of the running pod: -+ -[source,terminal] ----- -$ oc get pod -n openshift-ingress -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -router-default-86798b4b5d-bdlvd 1/1 Running 0 28s 10.130.2.4 ip-10-0-217-226.ec2.internal -router-default-955d875f4-255g8 0/1 Terminating 0 19h 10.129.2.4 ip-10-0-148-172.ec2.internal ----- -+ -In this example, the running pod is on the `ip-10-0-217-226.ec2.internal` node. - -.. View the node status of the running pod: -+ -[source,terminal] ----- -$ oc get node <1> ----- -<1> Specify the `` that you obtained from the pod list. -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-217-226.ec2.internal Ready infra,worker 17h v1.27.3 ----- -+ -Because the role list includes `infra`, the pod is running on the correct node. diff --git a/modules/infrastructure-node-sizing.adoc b/modules/infrastructure-node-sizing.adoc deleted file mode 100644 index b36e798bab5b..000000000000 --- a/modules/infrastructure-node-sizing.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc - -[id="infrastructure-node-sizing_{context}"] -= Infrastructure node sizing - -_Infrastructure nodes_ are nodes that are labeled to run pieces of the {product-title} environment. The infrastructure node resource requirements depend on the cluster age, nodes, and objects in the cluster, as these factors can lead to an increase in the number of metrics or time series in Prometheus. The following infrastructure node size recommendations are based on the results observed in cluster-density testing detailed in the *Control plane node sizing* section, where the monitoring stack and the default ingress-controller were moved to these nodes. - -[options="header",cols="4*"] -|=== -| Number of worker nodes |Cluster density, or number of namespaces |CPU cores |Memory (GB) - -| 27 -| 500 -| 4 -| 24 - -| 120 -| 1000 -| 8 -| 48 - -| 252 -| 4000 -| 16 -| 128 - -| 501 -| 4000 -| 32 -| 128 - -|=== - -In general, three infrastructure nodes are recommended per cluster. - -[IMPORTANT] -==== -These sizing recommendations should be used as a guideline. Prometheus is a highly memory intensive application; the resource usage depends on various factors including the number of nodes, objects, the Prometheus metrics scraping interval, metrics or time series, and the age of the cluster. In addition, the router resource usage can also be affected by the number of routes and the amount/type of inbound requests. - -These recommendations apply only to infrastructure nodes hosting Monitoring, Ingress and Registry infrastructure components installed during cluster creation. -==== - -[NOTE] -==== -In {product-title} {product-version}, half of a CPU core (500 millicore) is now reserved by the system by default compared to {product-title} 3.11 and previous versions. This influences the stated sizing recommendations. -==== diff --git a/modules/ingress-liveness-readiness-startup-probes.adoc b/modules/ingress-liveness-readiness-startup-probes.adoc deleted file mode 100644 index 1459013e1e9b..000000000000 --- a/modules/ingress-liveness-readiness-startup-probes.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: REFERENCE -[id="ingress-liveness-readiness-startup-probes_{context}"] -= Configuring Ingress Controller liveness, readiness, and startup probes - -Cluster administrators can configure the timeout values for the kubelet's liveness, readiness, and startup probes for router deployments that are managed by the {product-title} Ingress Controller (router). The liveness and readiness probes of the router use the default timeout value -of 1 second, which is too brief when networking or runtime performance is severely degraded. Probe timeouts can cause unwanted router restarts that interrupt application connections. The ability to set larger timeout values can reduce the risk of unnecessary and unwanted restarts. - -You can update the `timeoutSeconds` value on the `livenessProbe`, `readinessProbe`, and `startupProbe` parameters of the router container. - -[cols="3a,8a",options="header"] -|=== - |Parameter |Description - - |`livenessProbe` - |The `livenessProbe` reports to the kubelet whether a pod is dead and needs to be restarted. - - |`readinessProbe` - |The `readinessProbe` reports whether a pod is healthy or unhealthy. When the readiness probe reports an unhealthy pod, then the kubelet marks the pod as not ready to accept traffic. Subsequently, the endpoints for that pod are marked as not ready, and this status propagates to the kube-proxy. On cloud platforms with a configured load balancer, the kube-proxy communicates to the cloud load-balancer not to send traffic to the node with that pod. - - |`startupProbe` - |The `startupProbe` gives the router pod up to 2 minutes to initialize before the kubelet begins sending the router liveness and readiness probes. This initialization time can prevent routers with many routes or endpoints from prematurely restarting. -|=== - - -[IMPORTANT] -==== -The timeout configuration option is an advanced tuning technique that can be used to work around issues. However, these issues should eventually be diagnosed and possibly a support case or https://issues.redhat.com/secure/CreateIssueDetails!init.jspa?pid=12332330&summary=Summary&issuetype=1&priority=10200&versions=12385624[Jira issue] opened for any issues that causes probes to time out. -==== - -The following example demonstrates how you can directly patch the default router deployment to set a 5-second timeout for the liveness and readiness probes: - - -[source, terminal] ----- -$ oc -n openshift-ingress patch deploy/router-default --type=strategic --patch='{"spec":{"template":{"spec":{"containers":[{"name":"router","livenessProbe":{"timeoutSeconds":5},"readinessProbe":{"timeoutSeconds":5}}]}}}}' ----- - -.Verification -[source, terminal] ----- -$ oc -n openshift-ingress describe deploy/router-default | grep -e Liveness: -e Readiness: - Liveness: http-get http://:1936/healthz delay=0s timeout=5s period=10s #success=1 #failure=3 - Readiness: http-get http://:1936/healthz/ready delay=0s timeout=5s period=10s #success=1 #failure=3 ----- diff --git a/modules/ingress-operator.adoc b/modules/ingress-operator.adoc deleted file mode 100644 index 595f04371018..000000000000 --- a/modules/ingress-operator.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="ingress-operator_{context}"] -= Ingress Operator - -[discrete] -== Purpose - -The Ingress Operator configures and manages the {product-title} router. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-ingress-operator[openshift-ingress-operator] - -[discrete] -== CRDs - -* `clusteringresses.ingress.openshift.io` -** Scope: Namespaced -** CR: `clusteringresses` -** Validation: No - -[discrete] -== Configuration objects - -* Cluster config -** Type Name: `clusteringresses.ingress.openshift.io` -** Instance Name: `default` -** View Command: -+ -[source,terminal] ----- -$ oc get clusteringresses.ingress.openshift.io -n openshift-ingress-operator default -o yaml ----- - -[discrete] -== Notes - -The Ingress Operator sets up the router in the `openshift-ingress` project and creates the deployment for the router: - -[source,terminal] ----- -$ oc get deployment -n openshift-ingress ----- - -The Ingress Operator uses the `clusterNetwork[].cidr` from the `network/cluster` status to determine what mode (IPv4, IPv6, or dual stack) the managed Ingress Controller (router) should operate in. For example, if `clusterNetwork` contains only a v6 `cidr`, then the Ingress Controller operates in IPv6-only mode. - -In the following example, Ingress Controllers managed by the Ingress Operator will run in IPv4-only mode because only one cluster network exists and the network is an IPv4 `cidr`: - -[source,terminal] ----- -$ oc get network/cluster -o jsonpath='{.status.clusterNetwork[*]}' ----- - -.Example output -[source,terminal] ----- -map[cidr:10.128.0.0/14 hostPrefix:23] ----- diff --git a/modules/insights-operator-about.adoc b/modules/insights-operator-about.adoc deleted file mode 100644 index c98ba34c3b4e..000000000000 --- a/modules/insights-operator-about.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/about-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/about-remote-health-monitoring.adoc - -:_content-type: CONCEPT -[id="insights-operator-about_{context}"] -= About the Insights Operator - -The Insights Operator periodically gathers configuration and component failure status and, by default, reports that data every two hours to Red Hat. This information enables Red Hat to assess configuration and deeper failure data than is reported through Telemetry. - -Users of {product-title} can display the report of each cluster in the {insights-advisor-url} service on {hybrid-console}. If any issues have been identified, Insights provides further details and, if available, steps on how to solve a problem. - -The Insights Operator does not collect identifying information, such as user names, passwords, or certificates. See link:https://console.redhat.com/security/insights[Red Hat Insights Data & Application Security] for information about Red Hat Insights data collection and controls. - -Red Hat uses all connected cluster information to: - -* Identify potential cluster issues and provide a solution and preventive actions in the {insights-advisor-url} service on {hybrid-console} -* Improve {product-title} by providing aggregated and critical information to product and support teams -* Make {product-title} more intuitive diff --git a/modules/insights-operator-advisor-overview.adoc b/modules/insights-operator-advisor-overview.adoc deleted file mode 100644 index 30fec852260f..000000000000 --- a/modules/insights-operator-advisor-overview.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: CONCEPT -[id="insights-operator-advisor-overview_{context}"] -= About Red Hat Insights Advisor for {product-title} - -You can use Insights Advisor to assess and monitor the health of your {product-title} clusters. Whether you are concerned about individual clusters, or with your whole infrastructure, it is important to be aware of your exposure to issues that can affect service availability, fault tolerance, performance, or security. - -Insights repeatedly analyzes the data that Insights Operator sends using a database of _recommendations_, which are sets of conditions that can leave your {product-title} clusters at risk. Your data is then uploaded to the Insights Advisor service on Red Hat Hybrid Cloud Console where you can perform the following actions: - -* See clusters impacted by a specific recommendation. -* Use robust filtering capabilities to refine your results to those recommendations. -* Learn more about individual recommendations, details about the risks they present, and get resolutions tailored to your individual clusters. -* Share results with other stakeholders. diff --git a/modules/insights-operator-advisor-recommendations.adoc b/modules/insights-operator-advisor-recommendations.adoc deleted file mode 100644 index 1c2f1a6088cf..000000000000 --- a/modules/insights-operator-advisor-recommendations.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: CONCEPT -[id="insights-operator-advisor-recommendations_{context}"] -= Understanding Insights Advisor recommendations - -Insights Advisor bundles information about various cluster states and component configurations that can negatively affect the service availability, fault tolerance, performance, or security of your clusters. This information set is called a recommendation in Insights Advisor and includes the following information: - -* *Name:* A concise description of the recommendation -* *Added:* When the recommendation was published to the Insights Advisor archive -* *Category:* Whether the issue has the potential to negatively affect service availability, fault tolerance, performance, or security -* *Total risk:* A value derived from the _likelihood_ that the condition will negatively affect your infrastructure, and the _impact_ on operations if that were to happen -* *Clusters:* A list of clusters on which a recommendation is detected -* *Description:* A brief synopsis of the issue, including how it affects your clusters -* *Link to associated topics:* More information from Red Hat about the issue diff --git a/modules/insights-operator-configuring-sca.adoc b/modules/insights-operator-configuring-sca.adoc deleted file mode 100644 index 66dc1f5f163e..000000000000 --- a/modules/insights-operator-configuring-sca.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/insights-operator-simple-access.adoc -// * sd_support/remote_health_monitoring/insights-operator-simple-access.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-configuring-sca_{context}"] -= Configuring simple content access import interval - -You can configure how often the Insights Operator imports the simple content access entitlements using the `support` secret in the `openshift-config` namespace. The entitlement import normally occurs every eight hours, but you can shorten this interval if you update your simple content access configuration in Red Hat Subscription Management. - -This procedure describes how to update the import interval to one hour. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `scaInterval` with a value of `1h`, and click *Save*. -+ -[NOTE] -==== -The interval `1h` can also be entered as `60m` for 60 minutes. -==== diff --git a/modules/insights-operator-configuring.adoc b/modules/insights-operator-configuring.adoc deleted file mode 100644 index 258d044fa211..000000000000 --- a/modules/insights-operator-configuring.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-configuring-sca_{context}"] -= Configuring Insights Operator - -You can configure Insights Operator to meet the needs of your organization. The Insights Operator is configured using a combination of the default configurations in the `pod.yaml` file in the Insights Operator `Config` directory and the configurations stored in the `support` secret in the `openshift-config` namespace. The `support` secret does not exist by default and must be created when adding custom configurations for the first time. Configurations in the `support` secret override the defaults set in the `pod.yaml` file. - -The table below describes the available configuration attributes: - -.Insights Operator configurable attributes -[options="header"] -|==== -|Attribute name|Description|Value type|Default value -|`username`|Specifies username for basic authentication with `console.redhat.com` (overrides the default `pull-secret` token authentication when set)|String|Not set -|`password`|Specifies password for basic authentication with `console.redhat.com` (overrides the default `pull-secret` token authentication when set)|String|Not set -|`enableGlobalObfuscation`|Enables the global obfuscation of IP addresses and the cluster domain name|Boolean|`false` -|`scaInterval`|Specifies the frequency of the simple content access entitlements download|Time interval|`8h` -|`scaPullDisabled`|Disables the simple content access entitlements download|Boolean|`false` -|`clusterTransferInterval`|Specifies how often Insights Operator checks OpenShift Cluster Manager for available cluster transfers|Time interval|`24h` -|`disableInsightsAlerts`|Disables Insights Operator alerts to the cluster Prometheus instance|Boolean|`False` -|==== - -This procedure describes how to set custom Insights Operator configurations. - -[IMPORTANT] -==== -Red Hat recommends you consult Red Hat Support before making changes to the default Insights Operator configuration. -==== - -.Prerequisites - -* You are logged in to the {product-title} web console as a user with `cluster-admin` role. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. On the *Secrets* page, select *All Projects* from the *Project* list, and then set *Show default projects* to on. -. Select the *openshift-config* project from the *Project* list. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab} for the secret, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Enter an attribute name with an appropriate value (see table above), and click *Save*. -. Repeat the above steps for any additional configurations. diff --git a/modules/insights-operator-disabling-sca.adoc b/modules/insights-operator-disabling-sca.adoc deleted file mode 100644 index 8f6a91e2df09..000000000000 --- a/modules/insights-operator-disabling-sca.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/insights-operator-simple-access.adoc -// * sd_support/remote_health_monitoring/insights-operator-simple-access.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-disabling-sca_{context}"] -= Disabling simple content access import - -You can disable the importing of simple content access entitlements using the `support` secret in the `openshift-config` namespace. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `scaPullDisabled` with a value of `true`, and click *Save*. -+ -The simple content access entitlement import is now disabled. -+ -[NOTE] -==== -To enable the simple content access import again, edit the `support` secret and delete the `scaPullDisabled` key. -==== diff --git a/modules/insights-operator-downloading-archive.adoc b/modules/insights-operator-downloading-archive.adoc deleted file mode 100644 index 81df8ba614c2..000000000000 --- a/modules/insights-operator-downloading-archive.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - -:_content-type: PROCEDURE -[id="insights-operator-downloading-archive_{context}"] -= Downloading your Insights Operator archive - -Insights Operator stores gathered data in an archive located in the `openshift-insights` namespace of your cluster. You can download and review the data that is gathered by the Insights Operator. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Find the name of the running pod for the Insights Operator: -+ -[source,terminal] ----- -$ oc get pods --namespace=openshift-insights -o custom-columns=:metadata.name --no-headers --field-selector=status.phase=Running ----- - -. Copy the recent data archives collected by the Insights Operator: -+ -[source,terminal] ----- -$ oc cp openshift-insights/:/var/lib/insights-operator ./insights-data <1> ----- -<1> Replace `` with the pod name output from the preceding command. - -The recent Insights Operator archives are now available in the `insights-data` directory. diff --git a/modules/insights-operator-enable-obfuscation.adoc b/modules/insights-operator-enable-obfuscation.adoc deleted file mode 100644 index b095cb44ae56..000000000000 --- a/modules/insights-operator-enable-obfuscation.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-enable-obfuscation_{context}"] -= Enabling Insights Operator data obfuscation - -You can enable obfuscation to mask sensitive and identifiable IPv4 addresses and cluster base domains that the Insights Operator sends to link:https://console.redhat.com[console.redhat.com]. - -[WARNING] -==== -Although this feature is available, Red Hat recommends keeping obfuscation disabled for a more effective support experience. -==== - -Obfuscation assigns non-identifying values to cluster IPv4 addresses, and uses a translation table that is retained in memory to change IP addresses to their obfuscated versions throughout the Insights Operator archive before uploading the data to link:https://console.redhat.com[console.redhat.com]. - -For cluster base domains, obfuscation changes the base domain to a hardcoded substring. For example, `cluster-api.openshift.example.com` becomes `cluster-api.`. - -The following procedure enables obfuscation using the `support` secret in the `openshift-config` namespace. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `enableGlobalObfuscation` with a value of `true`, and click *Save*. -. Navigate to *Workloads* -> *Pods* -. Select the `openshift-insights` project. -. Find the `insights-operator` pod. -. To restart the `insights-operator` pod, click the *Options* menu {kebab}, and then click *Delete Pod*. - -.Verification - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-insights* project. -. Search for the *obfuscation-translation-table* secret using the *Search by name* field. - -If the `obfuscation-translation-table` secret exists, then obfuscation is enabled and working. - -Alternatively, you can inspect `/insights-operator/gathers.json` in your Insights Operator archive for the value `"is_global_obfuscation_enabled": true`. diff --git a/modules/insights-operator-gather-duration.adoc b/modules/insights-operator-gather-duration.adoc deleted file mode 100644 index 9159f06550b5..000000000000 --- a/modules/insights-operator-gather-duration.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - -:_content-type: PROCEDURE -[id="insights-operator-gather-duration_{context}"] -= Viewing Insights Operator gather durations - -You can view the time it takes for the Insights Operator to gather the information contained in the archive. This helps you to understand Insights Operator resource usage and issues with Insights Advisor. - - -.Prerequisites - -* A recent copy of your Insights Operator archive. - -.Procedure - -. From your archive, open `/insights-operator/gathers.json`. -+ -The file contains a list of Insights Operator gather operations: -+ -[source,json] ----- - { - "name": "clusterconfig/authentication", - "duration_in_ms": 730, <1> - "records_count": 1, - "errors": null, - "panic": null - } ----- -+ -<1> `duration_in_ms` is the amount of time in milliseconds for each gather operation. - -. Inspect each gather operation for abnormalities. diff --git a/modules/insights-operator-manual-upload.adoc b/modules/insights-operator-manual-upload.adoc deleted file mode 100644 index b705a2c1b211..000000000000 --- a/modules/insights-operator-manual-upload.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-manual-upload_{context}"] -= Uploading an Insights Operator archive - -You can manually upload an Insights Operator archive to link:https://console.redhat.com[console.redhat.com] to diagnose potential issues. - -.Prerequisites - -* You are logged in to {product-title} as `cluster-admin`. -* You have a workstation with unrestricted internet access. -* You have created a copy of the Insights Operator archive. - -.Procedure - -. Download the `dockerconfig.json` file: -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --to=. ----- -. Copy your `"cloud.openshift.com"` `"auth"` token from the `dockerconfig.json` file: -+ -[source,json,subs="+quotes"] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "__", - "email": "asd@redhat.com" - } -} ----- - - -. Upload the archive to link:https://console.redhat.com[console.redhat.com]: -+ -[source,terminal,subs="+quotes"] ----- -$ curl -v -H "User-Agent: insights-operator/one10time200gather184a34f6a168926d93c330 cluster/__" -H "Authorization: Bearer __" -F "upload=@__; type=application/vnd.redhat.openshift.periodic+tar" https://console.redhat.com/api/ingress/v1/upload ----- -where `__` is your cluster ID, `__` is the token from your pull secret, and `__` is the path to the Insights Operator archive. -+ -If the operation is successful, the command returns a `"request_id"` and `"account_number"`: -+ -.Example output -+ -[source,terminal] ----- -* Connection #0 to host console.redhat.com left intact -{"request_id":"393a7cf1093e434ea8dd4ab3eb28884c","upload":{"account_number":"6274079"}}% ----- - -.Verification steps - -. Log in to link:https://console.redhat.com/openshift[]. - -. Click the *Clusters* menu in the left pane. - -. To display the details of the cluster, click the cluster name. - -. Open the *Insights Advisor* tab of the cluster. -+ -If the upload was successful, the tab displays one of the following: -+ -* *Your cluster passed all recommendations*, if Insights Advisor did not identify any issues. - -* A list of issues that Insights Advisor has detected, prioritized by risk (low, moderate, important, and critical). diff --git a/modules/insights-operator-new-pull-secret-disabled.adoc b/modules/insights-operator-new-pull-secret-disabled.adoc deleted file mode 100644 index f1078cb7a4c5..000000000000 --- a/modules/insights-operator-new-pull-secret-disabled.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc - -:_content-type: PROCEDURE -[id="insights-operator-new-pull-secret_{context}"] -= Modifying the global cluster pull secret to disable remote health reporting - -You can modify your existing global cluster pull secret to disable remote health reporting. This disables both Telemetry and the Insights Operator. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Download the global cluster pull secret to your local file system. -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --to=. ----- - -. In a text editor, edit the `.dockerconfigjson` file that was downloaded. - -. Remove the `cloud.openshift.com` JSON entry, for example: -+ -[source,json] ----- -"cloud.openshift.com":{"auth":"","email":""} ----- - -. Save the file. - -You can now update your cluster to use this modified pull secret. diff --git a/modules/insights-operator-new-pull-secret-enable.adoc b/modules/insights-operator-new-pull-secret-enable.adoc deleted file mode 100644 index f563ef1c40e0..000000000000 --- a/modules/insights-operator-new-pull-secret-enable.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/enabling-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/enabling-remote-health-reporting.adoc - -:_content-type: PROCEDURE -[id="insights-operator-new-pull-secret-enable_{context}"] -= Modifying your global cluster pull secret to enable remote health reporting - -You can modify your existing global cluster pull secret to enable remote health reporting. If you have previously disabled remote health monitoring, you must first download a new pull secret with your `console.openshift.com` access token from {cluster-manager-first}. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Access to {cluster-manager}. - -.Procedure - -. Navigate to link:https://console.redhat.com/openshift/downloads[https://console.redhat.com/openshift/downloads]. -. From *Tokens* -> *Pull Secret*, click *Download*. -+ -The file `pull-secret.txt` containing your `cloud.openshift.com` access token in JSON format downloads: -+ -[source,json,subs="+quotes"] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "__", - "email": "__" - } -} ----- - -. Download the global cluster pull secret to your local file system. -+ -[source,terminal] ----- -$ oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' > pull-secret ----- -. Make a backup copy of your pull secret. -+ -[source,terminal] ----- -$ cp pull-secret pull-secret-backup ----- -. Open the `pull-secret` file in a text editor. -. Append the `cloud.openshift.com` JSON entry from `pull-secret.txt` into `auths`. -. Save the file. -. Update the secret in your cluster. -+ -[source,terminal] ----- -oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=pull-secret ----- - -It may take several minutes for the secret to update and your cluster to begin reporting. - -.Verification - -. Navigate to the {product-title} Web Console Overview page. -. *Insights* in the *Status* tile reports the number of issues found. diff --git a/modules/insights-operator-one-time-gather.adoc b/modules/insights-operator-one-time-gather.adoc deleted file mode 100644 index 0e3119e2926f..000000000000 --- a/modules/insights-operator-one-time-gather.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-one-time-gather_{context}"] -= Running an Insights Operator gather operation - -You must run a gather operation to create an Insights Operator archive. - -.Prerequisites - -* You are logged in to {product-title} as `cluster-admin`. - -.Procedure - -. Create a file named `gather-job.yaml` using this template: -+ -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.14/docs/gather-job.yaml[] ----- -. Copy your `insights-operator` image version: -+ -[source,terminal] ----- -$ oc get -n openshift-insights deployment insights-operator -o yaml ----- -. Paste your image version in `gather-job.yaml`: -+ -[source,yaml,subs="+quotes"] ----- -initContainers: - - name: insights-operator - image: __ - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: ----- -. Create the gather job: -+ -[source,terminal] ----- -$ oc apply -n openshift-insights -f gather-job.yaml ----- -. Find the name of the job pod: -+ -[source,terminal] ----- -$ oc describe -n openshift-insights job/insights-operator-job ----- -+ -.Example output -[source,terminal,subs="+quotes"] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulCreate 7m18s job-controller Created pod: insights-operator-job-__ ----- -where `insights-operator-job-__` is the name of the pod. - -. Verify that the operation has finished: -+ -[source,terminal,subs="+quotes"] ----- -$ oc logs -n openshift-insights insights-operator-job-__ insights-operator ----- -+ -.Example output -[source,terminal] ----- -I0407 11:55:38.192084 1 diskrecorder.go:34] Wrote 108 records to disk in 33ms ----- -. Save the created archive: -+ -[source,terminal,subs="+quotes"] ----- -$ oc cp openshift-insights/insights-operator-job-__:/var/lib/insights-operator ./insights-data ----- -. Clean up the job: -+ -[source,terminal] ----- -$ oc delete -n openshift-insights job insights-operator-job ----- diff --git a/modules/insights-operator-showing-data-collected-from-the-cluster.adoc b/modules/insights-operator-showing-data-collected-from-the-cluster.adoc deleted file mode 100644 index 64dbaf590eda..000000000000 --- a/modules/insights-operator-showing-data-collected-from-the-cluster.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc -// * support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc - -:_content-type: PROCEDURE -[id="insights-operator-showing-data-collected-from-the-cluster_{context}"] -= Showing data collected by the Insights Operator - -You can review the data that is collected by the Insights Operator. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Find the name of the currently running pod for the Insights Operator: -+ -[source,terminal] ----- -$ INSIGHTS_OPERATOR_POD=$(oc get pods --namespace=openshift-insights -o custom-columns=:metadata.name --no-headers --field-selector=status.phase=Running) ----- - -. Copy the recent data archives collected by the Insights Operator: -+ -[source,terminal] ----- -$ oc cp openshift-insights/$INSIGHTS_OPERATOR_POD:/var/lib/insights-operator ./insights-data ----- - -The recent Insights Operator archives are now available in the `insights-data` directory. diff --git a/modules/insights-operator-what-information-is-collected.adoc b/modules/insights-operator-what-information-is-collected.adoc deleted file mode 100644 index 72e0cbcbf9a4..000000000000 --- a/modules/insights-operator-what-information-is-collected.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/about-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/about-remote-health-monitoring.adoc - -[id="insights-operator-what-information-is-collected_{context}"] -= Information collected by the Insights Operator - -The following information is collected by the Insights Operator: - -* General information about your cluster and its components to identify issues that are specific to your {product-title} version and environment -* Configuration files, such as the image registry configuration, of your cluster to determine incorrect settings and issues that are specific to parameters you set -* Errors that occur in the cluster components -* Progress information of running updates, and the status of any component upgrades -* Details of the platform that {product-title} is deployed on, such as Amazon Web Services, and the region that the cluster is located in -ifndef::openshift-dedicated[] -* Cluster workload information transformed into discreet Secure Hash Algorithm (SHA) values, which allows Red Hat to assess workloads for security and version vulnerabilities without disclosing sensitive details -endif::openshift-dedicated[] -* If an Operator reports an issue, information is collected about core {product-title} pods in the `openshift-*` and `kube-*` projects. This includes state, resource, security context, volume information, and more. diff --git a/modules/insights-operator.adoc b/modules/insights-operator.adoc deleted file mode 100644 index c6b730da4cf7..000000000000 --- a/modules/insights-operator.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -:_content-type: REFERENCE -[id="insights-operator_{context}"] -ifdef::operator-ref[= Insights Operator] -ifdef::cluster-caps[= Insights capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Insights Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Insights Operator provides the features for the `Insights` capability. - -endif::cluster-caps[] - -The Insights Operator gathers {product-title} configuration data and sends it to Red Hat. The data is used to produce proactive insights recommendations about potential issues that a cluster might be exposed to. These insights are communicated to cluster administrators through Insights Advisor on link:https://console.redhat.com/[console.redhat.com]. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/insights-operator[insights-operator] - -[discrete] -== Configuration - -No configuration is required. - -endif::operator-ref[] - -[discrete] -== Notes - -Insights Operator complements {product-title} Telemetry. - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/inspecting-pod-and-container-logs.adoc b/modules/inspecting-pod-and-container-logs.adoc deleted file mode 100644 index 011170bf8e26..000000000000 --- a/modules/inspecting-pod-and-container-logs.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="inspecting-pod-and-container-logs_{context}"] -= Inspecting pod and container logs - -You can inspect pod and container logs for warnings and error messages related to explicit pod failures. Depending on policy and exit code, pod and container logs remain available after pods have been terminated. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Query logs for a specific pod: -+ -[source,terminal] ----- -$ oc logs ----- - -. Query logs for a specific container within a pod: -+ -[source,terminal] ----- -$ oc logs -c ----- -+ -Logs retrieved using the preceding `oc logs` commands are composed of messages sent to stdout within pods or containers. - -. Inspect logs contained in `/var/log/` within a pod. -.. List log files and subdirectories contained in `/var/log` within a pod: -+ -[source,terminal] ----- -$ oc exec ls -alh /var/log ----- -+ -.. Query a specific log file contained in `/var/log` within a pod: -+ -[source,terminal] ----- -$ oc exec cat /var/log/ ----- -.. List log files and subdirectories contained in `/var/log` within a specific container: -+ -[source,terminal] ----- -$ oc exec -c ls /var/log ----- -+ -.. Query a specific log file contained in `/var/log` within a specific container: -+ -[source,terminal] ----- -$ oc exec -c cat /var/log/ ----- diff --git a/modules/install-booting-from-an-iso-over-http-redfish.adoc b/modules/install-booting-from-an-iso-over-http-redfish.adoc deleted file mode 100644 index 80d732fc82a2..000000000000 --- a/modules/install-booting-from-an-iso-over-http-redfish.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-booting-from-an-iso-over-http-redfish_{context}"] -= Booting from an HTTP-hosted ISO image using the Redfish API - -You can provision hosts in your network using ISOs that you install using the Redfish Baseboard Management Controller (BMC) API. - -.Prerequisites - -. Download the installation {op-system-first} ISO. - -.Procedure - -. Copy the ISO file to an HTTP server accessible in your network. - -. Boot the host from the hosted ISO file, for example: - -.. Call the redfish API to set the hosted ISO as the `VirtualMedia` boot media by running the following command: -+ -[source,terminal] ----- -$ curl -k -u : -d '{"Image":"", "Inserted": true}' -H "Content-Type: application/json" -X POST /redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia ----- -+ -Where: -+ --- -::: Is the username and password for the target host BMC. -:: Is the URL for the hosted installation ISO, for example: `http://webserver.example.com/rhcos-live-minimal.iso`. The ISO must be accessible from the target host machine. -:: Is the BMC IP address of the target host machine. --- - -.. Set the host to boot from the `VirtualMedia` device by running the following command: -+ -[source,terminal] ----- -$ curl -k -u : -X PATCH -H 'Content-Type: application/json' -d '{"Boot": {"BootSourceOverrideTarget": "Cd", "BootSourceOverrideMode": "UEFI", "BootSourceOverrideEnabled": "Once"}}' /redfish/v1/Systems/System.Embedded.1 ----- - -.. Reboot the host: -+ -[source,terminal] ----- -$ curl -k -u : -d '{"ResetType": "ForceRestart"}' -H 'Content-type: application/json' -X POST /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset ----- - -.. Optional: If the host is powered off, you can boot it using the `{"ResetType": "On"}` switch. Run the following command: -+ -[source,terminal] ----- -$ curl -k -u : -d '{"ResetType": "On"}' -H 'Content-type: application/json' -X POST /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset ----- diff --git a/modules/install-creating-install-config-aws-local-zones.adoc b/modules/install-creating-install-config-aws-local-zones.adoc deleted file mode 100644 index 4ea327ea350c..000000000000 --- a/modules/install-creating-install-config-aws-local-zones.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="install-creating-install-config-aws-local-zones_{context}"] -= Modifying an installation configuration file to use AWS Local Zones subnets - -Modify an `install-config.yaml` file to include AWS Local Zones subnets. - -.Prerequisites - -* You created subnets by using the procedure "Creating a subnet in AWS Local Zones". -* You created an `install-config.yaml` file by using the procedure "Creating the installation configuration file". - -.Procedure - -* Add the VPC and Local Zone subnets as the values of the `platform.aws.subnets` property. As an example: -+ -[source,yaml] ----- -... -platform: - aws: - region: us-west-2 - subnets: <1> - - publicSubnetId-1 - - publicSubnetId-2 - - publicSubnetId-3 - - privateSubnetId-1 - - privateSubnetId-2 - - privateSubnetId-3 - - publicSubnetId-LocalZone-1 -... ----- -<1> List of subnets created in the Availability and Local Zones. \ No newline at end of file diff --git a/modules/install-ibm-cloud-configuring-the-install-config-file.adoc b/modules/install-ibm-cloud-configuring-the-install-config-file.adoc deleted file mode 100644 index 37b0209b8656..000000000000 --- a/modules/install-ibm-cloud-configuring-the-install-config-file.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="configuring-the-install-config-file_{context}"] -= Configuring the install-config.yaml file - -The `install-config.yaml` file requires some additional details. Most of the information is teaching the installer and the resulting cluster enough about the available {ibmcloudBMRegProductName} hardware so that it is able to fully manage it. The material difference between installing on bare metal and installing on {ibmcloudBMProductName} is that you must explicitly set the privilege level for IPMI in the BMC section of the `install-config.yaml` file. - -.Procedure - -. Configure `install-config.yaml`. Change the appropriate variables to match the environment, including `pullSecret` and `sshKey`. -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: -metadata: - name: -networking: - machineNetwork: - - cidr: - networkType: OVNKubernetes -compute: -- name: worker - replicas: 2 -controlPlane: - name: master - replicas: 3 - platform: - baremetal: {} -platform: - baremetal: - apiVIP: - ingressVIP: - provisioningNetworkInterface: - provisioningNetworkCIDR: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: ipmi://10.196.130.145?privilegelevel=OPERATOR <1> - username: root - password: - bootMACAddress: 00:e0:ed:6a:ca:b4 <2> - rootDeviceHints: - deviceName: "/dev/sda" - - name: openshift-worker-0 - role: worker - bmc: - address: ipmi://?privilegelevel=OPERATOR <1> - username: - password: - bootMACAddress: <2> - rootDeviceHints: - deviceName: "/dev/sda" -pullSecret: '' -sshKey: '' ----- -+ -<1> The `bmc.address` provides a `privilegelevel` configuration setting with the value set to `OPERATOR`. This is required for {ibmcloudBMProductName} infrastructure. -<2> Add the MAC address of the private `provisioning` network NIC for the corresponding node. -+ -[NOTE] -==== -You can use the `ibmcloud` command-line utility to retrieve the password. - -[source,terminal] ----- -$ ibmcloud sl hardware detail --output JSON | \ - jq '"(.networkManagementIpAddress) (.remoteManagementAccounts[0].password)"' ----- - -Replace `` with the ID of the node. -==== - -. Create a directory to store the cluster configuration: -+ -[source,terminal] ----- -$ mkdir ~/clusterconfigs ----- - -. Copy the `install-config.yaml` file into the directory: -+ -[source,terminal] ----- -$ cp install-config.yaml ~/clusterconfig ----- - -. Ensure all bare metal nodes are powered off prior to installing the {product-title} cluster: -+ -[source,terminal] ----- -$ ipmitool -I lanplus -U -P -H power off ----- - -. Remove old bootstrap resources if any are left over from a previous deployment attempt: -+ -[source,bash] ----- -for i in $(sudo virsh list | tail -n +3 | grep bootstrap | awk {'print $2'}); -do - sudo virsh destroy $i; - sudo virsh undefine $i; - sudo virsh vol-delete $i --pool $i; - sudo virsh vol-delete $i.ign --pool $i; - sudo virsh pool-destroy $i; - sudo virsh pool-undefine $i; -done ----- diff --git a/modules/install-ibm-cloud-configuring-the-public-subnet.adoc b/modules/install-ibm-cloud-configuring-the-public-subnet.adoc deleted file mode 100644 index 44b819e16fb5..000000000000 --- a/modules/install-ibm-cloud-configuring-the-public-subnet.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="configuring-the-public-subnet_{context}"] -= Configuring the public subnet - -All of the {product-title} cluster nodes must be on the public subnet. {ibmcloudBMRegProductName} does not provide a DHCP server on the subnet. Set it up separately on the provisioner node. - -You must reset the BASH variables defined when preparing the provisioner node. Rebooting the provisioner node after preparing it will delete the BASH variables previously set. - -.Procedure - -. Install `dnsmasq`: -+ -[source,terminal] ----- -$ sudo dnf install dnsmasq ----- - -. Open the `dnsmasq` configuration file: -+ -[source,terminal] ----- -$ sudo vi /etc/dnsmasq.conf ----- - -. Add the following configuration to the `dnsmasq` configuration file: -+ -[source,text] ----- -interface=baremetal -except-interface=lo -bind-dynamic -log-dhcp - -dhcp-range=,, <1> -dhcp-option=baremetal,121,0.0.0.0/0,,, <2> - -dhcp-hostsfile=/var/lib/dnsmasq/dnsmasq.hostsfile ----- -+ -<1> Set the DHCP range. Replace both instances of `` with one unused IP address from the public subnet so that the `dhcp-range` for the `baremetal` network begins and ends with the same the IP address. Replace `` with the CIDR of the public subnet. -+ -<2> Set the DHCP option. Replace `` with the IP address of the gateway for the `baremetal` network. Replace `` with the IP address of the provisioner node's private IP address on the `provisioning` network. Replace `` with the IP address of the provisioner node's public IP address on the `baremetal` network. -+ -To retrieve the value for ``, execute: -+ -[source,terminal] ----- -$ ibmcloud sl subnet detail --output JSON | jq .cidr ----- -+ -Replace `` with the ID of the public subnet. -+ -To retrieve the value for ``, execute: -+ -[source,terminal] ----- -$ ibmcloud sl subnet detail --output JSON | jq .gateway -r ----- -+ -Replace `` with the ID of the public subnet. -+ -To retrieve the value for ``, execute: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail --output JSON | \ - jq .primaryBackendIpAddress -r ----- -+ -Replace `` with the ID of the provisioner node. -+ -To retrieve the value for ``, execute: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail --output JSON | jq .primaryIpAddress -r ----- -+ -Replace `` with the ID of the provisioner node. - -. Obtain the list of hardware for the cluster: -+ -[source,terminal] ----- -$ ibmcloud sl hardware list ----- - -. Obtain the MAC addresses and IP addresses for each node: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail --output JSON | \ - jq '.networkComponents[] | \ - "\(.primaryIpAddress) \(.macAddress)"' | grep -v null ----- -+ -Replace `` with the ID of the node. -+ -.Example output -[source,terminal] ----- -"10.196.130.144 00:e0:ed:6a:ca:b4" -"141.125.65.215 00:e0:ed:6a:ca:b5" ----- -+ -Make a note of the MAC address and IP address of the public network. Make a separate note of the MAC address of the private network, which you will use later in the `install-config.yaml` file. Repeat this procedure for each node until you have all the public MAC and IP addresses for the public `baremetal` network, and the MAC addresses of the private `provisioning` network. - -. Add the MAC and IP address pair of the public `baremetal` network for each node into the `dnsmasq.hostsfile` file: -+ -[source,terminal] ----- -$ sudo vim /var/lib/dnsmasq/dnsmasq.hostsfile ----- -+ -.Example input -[source,text] ----- -00:e0:ed:6a:ca:b5,141.125.65.215,master-0 -,,master-1 -,,master-2 -,,worker-0 -,,worker-1 -... ----- -+ -Replace `,` with the public MAC address and public IP address of the corresponding node name. - -. Start `dnsmasq`: -+ -[source,terminal] ----- -$ sudo systemctl start dnsmasq ----- - -. Enable `dnsmasq` so that it starts when booting the node: -+ -[source,terminal] ----- -$ sudo systemctl enable dnsmasq ----- - -. Verify `dnsmasq` is running: -+ -[source,terminal] ----- -$ sudo systemctl status dnsmasq ----- -+ -.Example output -[source,terminal] ----- -● dnsmasq.service - DNS caching server. -Loaded: loaded (/usr/lib/systemd/system/dnsmasq.service; enabled; vendor preset: disabled) -Active: active (running) since Tue 2021-10-05 05:04:14 CDT; 49s ago -Main PID: 3101 (dnsmasq) -Tasks: 1 (limit: 204038) -Memory: 732.0K -CGroup: /system.slice/dnsmasq.service -└─3101 /usr/sbin/dnsmasq -k ----- - -. Open ports `53` and `67` with UDP protocol: -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port 53/udp --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port 67/udp --permanent ----- - -. Add `provisioning` to the external zone with masquerade: -+ -[source,terminal] ----- -$ sudo firewall-cmd --change-zone=provisioning --zone=external --permanent ----- -+ -This step ensures network address translation for IPMI calls to the management subnet. - -. Reload the `firewalld` configuration: -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- diff --git a/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc b/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc deleted file mode 100644 index 06e6f3d883ae..000000000000 --- a/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc +++ /dev/null @@ -1,273 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="preparing-the-provisioner-node-for-openshift-install-on-ibm-cloud_{context}"] -= Preparing the provisioner node on {ibmcloudBMProductName} infrastructure - -Perform the following steps to prepare the provisioner node. - -.Procedure - -. Log in to the provisioner node via `ssh`. - -. Create a non-root user (`kni`) and provide that user with `sudo` privileges: -+ -[source,terminal] ----- -# useradd kni ----- -+ -[source,terminal] ----- -# passwd kni ----- -+ -[source,terminal] ----- -# echo "kni ALL=(root) NOPASSWD:ALL" | tee -a /etc/sudoers.d/kni ----- -+ -[source,terminal] ----- -# chmod 0440 /etc/sudoers.d/kni ----- - -. Create an `ssh` key for the new user: -+ -[source,terminal] ----- -# su - kni -c "ssh-keygen -f /home/kni/.ssh/id_rsa -N ''" ----- - -. Log in as the new user on the provisioner node: -+ -[source,terminal] ----- -# su - kni ----- - -. Use Red Hat Subscription Manager to register the provisioner node: -+ -[source,terminal] ----- -$ sudo subscription-manager register --username= --password= --auto-attach ----- -+ -[source,terminal] ----- -$ sudo subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms \ - --enable=rhel-8-for-x86_64-baseos-rpms ----- -+ -[NOTE] -==== -For more information about Red Hat Subscription Manager, see link:https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html-single/rhsm/index[Using and Configuring Red Hat Subscription Manager]. -==== - -. Install the following packages: -+ -[source,terminal] ----- -$ sudo dnf install -y libvirt qemu-kvm mkisofs python3-devel jq ipmitool ----- - -. Modify the user to add the `libvirt` group to the newly created user: -+ -[source,terminal] ----- -$ sudo usermod --append --groups libvirt kni ----- - -. Start `firewalld`: -+ -[source,terminal] ----- -$ sudo systemctl start firewalld ----- - -. Enable `firewalld`: -+ -[source,terminal] ----- -$ sudo systemctl enable firewalld ----- - -. Start the `http` service: -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone=public --add-service=http --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -. Start and enable the `libvirtd` service: -+ -[source,terminal] ----- -$ sudo systemctl enable libvirtd --now ----- - -. Set the ID of the provisioner node: -+ -[source,terminal] ----- -$ PRVN_HOST_ID= ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl hardware list ----- - -. Set the ID of the public subnet: -+ -[source,terminal] ----- -$ PUBLICSUBNETID= ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl subnet list ----- - -. Set the ID of the private subnet: -+ -[source,terminal] ----- -$ PRIVSUBNETID= ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl subnet list ----- - -. Set the provisioner node public IP address: -+ -[source,terminal] ----- -$ PRVN_PUB_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | jq .primaryIpAddress -r) ----- - -. Set the CIDR for the public network: -+ -[source,terminal] ----- -$ PUBLICCIDR=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .cidr) ----- - -. Set the IP address and CIDR for the public network: -+ -[source,terminal] ----- -$ PUB_IP_CIDR=$PRVN_PUB_IP/$PUBLICCIDR ----- - -. Set the gateway for the public network: -+ -[source,terminal] ----- -$ PUB_GATEWAY=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .gateway -r) ----- - -. Set the private IP address of the provisioner node: -+ -[source,terminal] ----- -$ PRVN_PRIV_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | \ - jq .primaryBackendIpAddress -r) ----- - -. Set the CIDR for the private network: -+ -[source,terminal] ----- -$ PRIVCIDR=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .cidr) ----- - -. Set the IP address and CIDR for the private network: -+ -[source,terminal] ----- -$ PRIV_IP_CIDR=$PRVN_PRIV_IP/$PRIVCIDR ----- - -. Set the gateway for the private network: -+ -[source,terminal] ----- -$ PRIV_GATEWAY=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .gateway -r) ----- - -. Set up the bridges for the `baremetal` and `provisioning` networks: -+ -[source,terminal] ----- -$ sudo nohup bash -c " - nmcli --get-values UUID con show | xargs -n 1 nmcli con delete - nmcli connection add ifname provisioning type bridge con-name provisioning - nmcli con add type bridge-slave ifname eth1 master provisioning - nmcli connection add ifname baremetal type bridge con-name baremetal - nmcli con add type bridge-slave ifname eth2 master baremetal - nmcli connection modify baremetal ipv4.addresses $PUB_IP_CIDR ipv4.method manual ipv4.gateway $PUB_GATEWAY - nmcli connection modify provisioning ipv4.addresses 172.22.0.1/24,$PRIV_IP_CIDR ipv4.method manual - nmcli connection modify provisioning +ipv4.routes \"10.0.0.0/8 $PRIV_GATEWAY\" - nmcli con down baremetal - nmcli con up baremetal - nmcli con down provisioning - nmcli con up provisioning - init 6 -" ----- -+ -[NOTE] -==== -For `eth1` and `eth2`, substitute the appropriate interface name, as needed. -==== - -. If required, SSH back into the `provisioner` node: -+ -[source,terminal] ----- -# ssh kni@provisioner.. ----- - -. Verify the connection bridges have been properly created: -+ -[source,terminal] ----- -$ sudo nmcli con show ----- -+ -.Example output -[source,terminal] ----- -NAME UUID TYPE DEVICE -baremetal 4d5133a5-8351-4bb9-bfd4-3af264801530 bridge baremetal -provisioning 43942805-017f-4d7d-a2c2-7cb3324482ed bridge provisioning -virbr0 d9bca40f-eee1-410b-8879-a2d4bb0465e7 bridge virbr0 -bridge-slave-eth1 76a8ed50-c7e5-4999-b4f6-6d9014dd0812 ethernet eth1 -bridge-slave-eth2 f31c3353-54b7-48de-893a-02d2b34c4736 ethernet eth2 ----- - -. Create a `pull-secret.txt` file: -+ -[source,terminal] ----- -$ vim pull-secret.txt ----- -+ -In a web browser, navigate to link:https://console.redhat.com/openshift/install/metal/user-provisioned[Install on Bare Metal with user-provisioned infrastructure]. In step 1, click **Download pull secret**. Paste the contents into the `pull-secret.txt` file and save the contents in the `kni` user's home directory. diff --git a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc deleted file mode 100644 index f998f454f09f..000000000000 --- a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -[id="setting-up-ibm-cloud-infrastructure_{context}"] -= Setting up IBM Cloud Bare Metal (Classic) infrastructure - -To deploy an {product-title} cluster on {ibmcloudBMRegProductName} infrastructure, you must first provision the IBM Cloud nodes. - -[IMPORTANT] -==== -Red Hat supports IPMI and PXE on the `provisioning` network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. The `provisioning` network is required. -==== - -You can customize IBM Cloud nodes using the IBM Cloud API. When creating IBM Cloud nodes, you must consider the following requirements. - -[discrete] -== Use one data center per cluster - -All nodes in the {product-title} cluster must run in the same IBM Cloud data center. - -[discrete] -== Create public and private VLANs - -Create all nodes with a single public VLAN and a single private VLAN. - -[discrete] -== Ensure subnets have sufficient IP addresses - -IBM Cloud public VLAN subnets use a `/28` prefix by default, which provides 16 IP addresses. That is sufficient for a cluster consisting of three control plane nodes, four worker nodes, and two IP addresses for the API VIP and Ingress VIP on the `baremetal` network. For larger clusters, you might need a smaller prefix. - -IBM Cloud private VLAN subnets use a `/26` prefix by default, which provides 64 IP addresses. {ibmcloudBMProductName} uses private network IP addresses to access the Baseboard Management Controller (BMC) of each node. {product-title} creates an additional subnet for the `provisioning` network. Network traffic for the `provisioning` network subnet routes through the private VLAN. For larger clusters, you might need a smaller prefix. - -.IP addresses per prefix -[options="header"] -|==== -|IP addresses |Prefix -|32| `/27` -|64| `/26` -|128| `/25` -|256| `/24` -|==== - -[discrete] -== Configuring NICs - -{product-title} deploys with two networks: - -- `provisioning`: The `provisioning` network is a non-routable network used for provisioning the underlying operating system on each node that is a part of the {product-title} cluster. - -- `baremetal`: The `baremetal` network is a routable network. You can use any NIC order to interface with the `baremetal` network, provided it is not the NIC specified in the `provisioningNetworkInterface` configuration setting or the NIC associated to a node's `bootMACAddress` configuration setting for the `provisioning` network. - -While the cluster nodes can contain more than two NICs, the installation process only focuses on the first two NICs. For example: - -[options="header"] -|=== -|NIC |Network |VLAN -| NIC1 | `provisioning` | -| NIC2 | `baremetal` | -|=== - -In the previous example, NIC1 on all control plane and worker nodes connects to the non-routable network (`provisioning`) that is only used for the installation of the {product-title} cluster. NIC2 on all control plane and worker nodes connects to the routable `baremetal` network. - -[options="header"] -|=== -|PXE |Boot order -| NIC1 PXE-enabled `provisioning` network | 1 -| NIC2 `baremetal` network. | 2 -|=== - -[NOTE] -==== -Ensure PXE is enabled on the NIC used for the `provisioning` network and is disabled on all other NICs. -==== - -[discrete] -== Configuring canonical names - -Clients access the {product-title} cluster nodes over the `baremetal` network. Configure IBM Cloud subdomains or subzones where the canonical name extension is the cluster name. - ----- -. ----- - -For example: - ----- -test-cluster.example.com ----- - -[discrete] -== Creating DNS entries - -You must create DNS `A` record entries resolving to unused IP addresses on the public subnet for the following: - -[width="100%", options="header"] -|===== -| Usage | Host Name | IP -| API | api.. | -| Ingress LB (apps) | *.apps.. | -|===== - -Control plane and worker nodes already have DNS entries after provisioning. - -The following table provides an example of fully qualified domain names. The API and Nameserver addresses begin with canonical name extensions. The host names of the control plane and worker nodes are examples, so you can use any host naming convention you prefer. - -[width="100%", options="header"] -|===== -| Usage | Host Name | IP -| API | api.. | -| Ingress LB (apps) | *.apps.. | -| Provisioner node | provisioner.. | -| Master-0 | openshift-master-0.. | -| Master-1 | openshift-master-1.. | -| Master-2 | openshift-master-2.. | -| Worker-0 | openshift-worker-0.. | -| Worker-1 | openshift-worker-1.. | -| Worker-n | openshift-worker-n.. | -|===== - -{product-title} includes functionality that uses cluster membership information to generate `A` records. This resolves the node names to their IP addresses. After the nodes are registered with the API, the cluster can disperse node information without using CoreDNS-mDNS. This eliminates the network traffic associated with multicast DNS. - -[IMPORTANT] -==== -After provisioning the IBM Cloud nodes, you must create a DNS entry for the `api..` domain name on the external DNS because removing CoreDNS causes the local entry to disappear. Failure to create a DNS record for the `api..` domain name in the external DNS server prevents worker nodes from joining the cluster. -==== - -[discrete] -== Network Time Protocol (NTP) - -Each {product-title} node in the cluster must have access to an NTP server. {product-title} nodes use NTP to synchronize their clocks. For example, cluster nodes use SSL certificates that require validation, which might fail if the date and time between the nodes are not in sync. - -[IMPORTANT] -==== -Define a consistent clock date and time format in each cluster node's BIOS settings, or installation might fail. -==== - -[discrete] -== Configure a DHCP server - -{ibmcloudBMProductName} does not run DHCP on the public or private VLANs. After provisioning IBM Cloud nodes, you must set up a DHCP server for the public VLAN, which corresponds to {product-title}'s `baremetal` network. - -[NOTE] -==== -The IP addresses allocated to each node do not need to match the IP addresses allocated by the {ibmcloudBMProductName} provisioning system. -==== - -See the "Configuring the public subnet" section for details. - -[discrete] -== Ensure BMC access privileges - -The "Remote management" page for each node on the dashboard contains the node's intelligent platform management interface (IPMI) credentials. The default IPMI privileges prevent the user from making certain boot target changes. You must change the privilege level to `OPERATOR` so that Ironic can make those changes. - -In the `install-config.yaml` file, add the `privilegelevel` parameter to the URLs used to configure each BMC. See the "Configuring the install-config.yaml file" section for additional details. For example: - -[source,yaml] ----- -ipmi://:?privilegelevel=OPERATOR ----- - -Alternatively, contact IBM Cloud support and request that they increase the IPMI privileges to `ADMINISTRATOR` for each node. - -[discrete] -== Create bare metal servers - -Create bare metal servers in the link:https://cloud.ibm.com[IBM Cloud dashboard] by navigating to *Create resource* -> *Bare Metal Servers for Classic*. - -Alternatively, you can create bare metal servers with the `ibmcloud` CLI utility. For example: - -[source,terminal] ----- -$ ibmcloud sl hardware create --hostname \ - --domain \ - --size \ - --os \ - --datacenter \ - --port-speed \ - --billing ----- - -See link:https://cloud.ibm.com/docs/cli?topic=cli-install-ibmcloud-cli[Installing the stand-alone IBM Cloud CLI] for details on installing the IBM Cloud CLI. - -[NOTE] -==== -IBM Cloud servers might take 3-5 hours to become available. -==== diff --git a/modules/install-openshift-common-terms.adoc b/modules/install-openshift-common-terms.adoc deleted file mode 100644 index 427185853c4f..000000000000 --- a/modules/install-openshift-common-terms.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/index.adoc - -:_content-type: REFERENCE -[id="install-openshift-common-terms_{context}"] -= Glossary of common terms for {product-title} installing - -This glossary defines common terms that are used in the installation content. These terms help you understand installation effectively. - -{ai-full}:: -An installer hosted at link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[console.redhat.com] that provides a web user interface or a RESTful API for creating a cluster configuration. The {ai-full} generates a discovery image. Cluster machines boot with the discovery image, which installs {op-system} and an agent. Together, the {ai-full} and agent provide pre-installation validation and installation for the cluster. - -Agent-based installer:: -An installer similar to the {ai-full}, but you must download the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. The agent-based installer is ideal for air-gapped/restricted networks. - -Bootstrap node:: -A temporary machine that runs a minimal Kubernetes configuration to deploy the {product-title} control plane. - -Control plane:: -A container orchestration layer that exposes the API and interfaces to define, deploy, and manage the lifecycle of containers. Also known as control plane machines. - -Compute node:: -Nodes that are responsible for executing workloads for cluster users. Also known as worker nodes. - -Disconnected installation:: -There are situations where parts of a data center might not have access to the internet, even through proxy servers. You can still install the {product-title} in these environments, but you must download the required software and images and make them available to the disconnected environment. - -The {product-title} installation program:: -A program that provisions the infrastructure and deploys a cluster. - -Installer-provisioned infrastructure:: -The installation program deploys and configures the infrastructure that the cluster runs on. - -Ignition config files:: -A file that Ignition uses to configure {op-system-first} during operating system initialization. The installation program generates different Ignition config files to initialize bootstrap, control plane, and worker nodes. - -Kubernetes manifests:: -Specifications of a Kubernetes API object in a JSON or YAML format. A configuration file can include deployments, config maps, secrets, daemonsets etc. - -Kubelet:: -A **primary node** agent that runs on each node in the cluster to ensure that containers are running in a pod. - -Load balancers:: -A load balancer serves as the single point of contact for clients. Load balancers for the API distribute incoming traffic across control plane nodes. - -Machine Config Operator:: -An Operator that manages and applies configuration and updates of the base operating system and container runtime, including everything between the kernel and kubelet for the nodes in the cluster. - -Operators:: -The preferred method of packaging, deploying, and managing a Kubernetes application in an {product-title} cluster. An operator takes human operational knowledge and encodes it into software that is easily packaged and shared with customers. - -User-provisioned infrastructure:: -You can install {product-title} on infrastructure that you provide. You can use the installation program to generate the assets required to provision the cluster infrastructure, create the cluster infrastructure, and then deploy the cluster to the infrastructure that you provided. diff --git a/modules/install-sno-about-installing-on-a-single-node.adoc b/modules/install-sno-about-installing-on-a-single-node.adoc deleted file mode 100644 index aba00569f8d6..000000000000 --- a/modules/install-sno-about-installing-on-a-single-node.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-preparing-to-install-sno.adoc - -:_content-type: CONCEPT -[id="install-sno-about-installing-on-a-single-node_{context}"] -= About OpenShift on a single node - -You can create a single-node cluster with standard installation methods. {product-title} on a single node is a specialized installation that requires the creation of a special ignition configuration ISO. The primary use case is for edge computing workloads, including intermittent connectivity, portable clouds, and 5G radio access networks (RAN) close to a base station. The major tradeoff with an installation on a single node is the lack of high availability. - -[IMPORTANT] -==== -The use of OpenShiftSDN with {sno} is not supported. OVN-Kubernetes is the default network plugin for {sno} deployments. -==== diff --git a/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc b/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc deleted file mode 100644 index 5a507f007ae5..000000000000 --- a/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-generating-the-discovery-iso-with-the-assisted-installer_{context}"] -= Generating the discovery ISO with the Assisted Installer - -Installing {product-title} on a single node requires a discovery ISO, which the Assisted Installer can generate. - -.Procedure - -. On the administration host, open a browser and navigate to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager-first}]. - -. Click *Create Cluster* to create a new cluster. - -. In the *Cluster name* field, enter a name for the cluster. - -. In the *Base domain* field, enter a base domain. For example: -+ ----- -example.com ----- -+ -All DNS records must be subdomains of this base domain and include the cluster name, for example: -+ ----- -.example.com ----- -+ -[NOTE] -==== -You cannot change the base domain or cluster name after cluster installation. -==== - -. Select *Install single node OpenShift (SNO)* and complete the rest of the wizard steps. Download the discovery ISO. - -. Make a note of the discovery ISO URL for installing with virtual media. - -[NOTE] -===== -If you enable {VirtProductName} during this process, you must have a second local storage device of at least 50GiB for your virtual machines. -===== diff --git a/modules/install-sno-generating-the-install-iso-manually.adoc b/modules/install-sno-generating-the-install-iso-manually.adoc deleted file mode 100644 index b57ba3120096..000000000000 --- a/modules/install-sno-generating-the-install-iso-manually.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="generating-the-install-iso-manually_{context}"] -= Generating the installation ISO with coreos-installer - -Installing {product-title} on a single node requires an installation ISO, which you can generate with the following procedure. - -.Prerequisites - -* Install `podman`. - -.Procedure - -. Set the {product-title} version: -+ -[source,terminal] ----- -$ OCP_VERSION= <1> ----- -+ -<1> Replace `` with the current version, for example, `latest-{product-version}` - -. Set the host architecture: -+ -[source,terminal] ----- -$ ARCH= <1> ----- -<1> Replace `` with the target host architecture, for example, `aarch64` or `x86_64`. - -. Download the {product-title} client (`oc`) and make it available for use by entering the following commands: -+ -[source,terminal] ----- -$ curl -k https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OCP_VERSION/openshift-client-linux.tar.gz -o oc.tar.gz ----- -+ -[source,terminal] ----- -$ tar zxf oc.tar.gz ----- -+ -[source,terminal] ----- -$ chmod +x oc ----- - -. Download the {product-title} installer and make it available for use by entering the following commands: -+ -[source,terminal] ----- -$ curl -k https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OCP_VERSION/openshift-install-linux.tar.gz -o openshift-install-linux.tar.gz ----- -+ -[source,terminal] ----- -$ tar zxvf openshift-install-linux.tar.gz ----- -+ -[source,terminal] ----- -$ chmod +x openshift-install ----- - -. Retrieve the {op-system} ISO URL by running the following command: -+ -[source,terminal] ----- -$ ISO_URL=$(./openshift-install coreos print-stream-json | grep location | grep $ARCH | grep iso | cut -d\" -f4) ----- - -. Download the {op-system} ISO: -+ -[source,terminal] ----- -$ curl -L $ISO_URL -o rhcos-live.iso ----- - -. Prepare the `install-config.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: <1> -compute: -- name: worker - replicas: 0 <2> -controlPlane: - name: master - replicas: 1 <3> -metadata: - name: <4> -networking: <5> - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 <6> - networkType: OVNKubernetes - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -bootstrapInPlace: - installationDisk: /dev/disk/by-id/ <7> -pullSecret: '' <8> -sshKey: | - <9> ----- -<1> Add the cluster domain name. -<2> Set the `compute` replicas to `0`. This makes the control plane node schedulable. -<3> Set the `controlPlane` replicas to `1`. In conjunction with the previous `compute` setting, this setting ensures the cluster runs on a single node. -<4> Set the `metadata` name to the cluster name. -<5> Set the `networking` details. OVN-Kubernetes is the only allowed network plugin type for single-node clusters. -<6> Set the `cidr` value to match the subnet of the {sno} cluster. -<7> Set the path to the installation disk drive, for example, `/dev/disk/by-id/wwn-0x64cd98f04fde100024684cf3034da5c2`. -<8> Copy the {cluster-manager-url-pull} and add the contents to this configuration setting. -<9> Add the public SSH key from the administration host so that you can log in to the cluster after installation. - -. Generate {product-title} assets by running the following commands: -+ -[source,terminal] ----- -$ mkdir ocp ----- -+ -[source,terminal] ----- -$ cp install-config.yaml ocp ----- -+ -[source,terminal] ----- -$ ./openshift-install --dir=ocp create single-node-ignition-config ----- - -. Embed the ignition data into the {op-system} ISO by running the following commands: -+ -[source,terminal] ----- -$ alias coreos-installer='podman run --privileged --pull always --rm \ - -v /dev:/dev -v /run/udev:/run/udev -v $PWD:/data \ - -w /data quay.io/coreos/coreos-installer:release' ----- -+ -[source,terminal] ----- -$ coreos-installer iso ignition embed -fi ocp/bootstrap-in-place-for-live-iso.ign rhcos-live.iso ----- diff --git a/modules/install-sno-installing-with-the-assisted-installer.adoc b/modules/install-sno-installing-with-the-assisted-installer.adoc deleted file mode 100644 index ee83eca56a83..000000000000 --- a/modules/install-sno-installing-with-the-assisted-installer.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-installing-with-the-assisted-installer_{context}"] -= Installing {sno} with the Assisted Installer - -Use the Assisted Installer to install the single-node cluster. - -.Procedure - -. Attach the {op-system} discovery ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO and then reboot the server. - -. On the administration host, return to the browser. Wait for the host to appear in the list of discovered hosts. If necessary, reload the link:https://console.redhat.com/openshift/assisted-installer/clusters[*Assisted Clusters*] page and select the cluster name. - -. Complete the install wizard steps. Add networking details, including a subnet from the available subnets. Add the SSH public key if necessary. - -. Monitor the installation's progress. Watch the cluster events. After the installation process finishes writing the operating system image to the server's hard disk, the server restarts. - -. Remove the discovery ISO, and reset the server to boot from the installation drive. -+ -The server restarts several times automatically, deploying the control plane. diff --git a/modules/install-sno-installing-with-usb-media.adoc b/modules/install-sno-installing-with-usb-media.adoc deleted file mode 100644 index 30da15ddf754..000000000000 --- a/modules/install-sno-installing-with-usb-media.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="installing-with-usb-media_{context}"] -= Creating a bootable ISO image on a USB drive - -You can install software using a bootable USB drive that contains an ISO image. Booting the server with the USB drive prepares the server for the software installation. - -.Procedure - -. On the administration host, insert a USB drive into a USB port. - -. Create a bootable USB drive, for example: -+ -[source,terminal] ----- -# dd if= of= status=progress ----- -+ -where: -+ --- -:: is the relative path to the downloaded ISO file, for example, `rhcos-live.iso`. -:: is the location of the connected USB drive, for example, `/dev/sdb`. --- -+ -After the ISO is copied to the USB drive, you can use the USB drive to install software on the server. diff --git a/modules/install-sno-monitoring-the-installation-manually.adoc b/modules/install-sno-monitoring-the-installation-manually.adoc deleted file mode 100644 index 4908ecdddcfc..000000000000 --- a/modules/install-sno-monitoring-the-installation-manually.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-monitoring-the-installation-manually_{context}"] -= Monitoring the cluster installation using openshift-install - -Use `openshift-install` to monitor the progress of the single-node cluster installation. - -.Procedure - -. Attach the modified {op-system} installation ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO and then reboot the server. - -. On the administration host, monitor the installation by running the following command: -+ -[source,terminal] ----- -$ ./openshift-install --dir=ocp wait-for install-complete ----- -+ -The server restarts several times while deploying the control plane. - -.Verification - -* After the installation is complete, check the environment by running the following command: -+ -[source,terminal] ----- -$ export KUBECONFIG=ocp/auth/kubeconfig ----- -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -control-plane.example.com Ready master,worker 10m v1.27.3 ----- diff --git a/modules/install-sno-requirements-for-installing-on-a-single-node.adoc b/modules/install-sno-requirements-for-installing-on-a-single-node.adoc deleted file mode 100644 index cf9fa67dd016..000000000000 --- a/modules/install-sno-requirements-for-installing-on-a-single-node.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-preparing-to-install-sno.adoc -:_content-type: CONCEPT - -[id="install-sno-requirements-for-installing-on-a-single-node_{context}"] -= Requirements for installing OpenShift on a single node - -Installing {product-title} on a single node alleviates some of the requirements for high availability and large scale clusters. However, you must address the following requirements: - -* *Administration host:* You must have a computer to prepare the ISO, to create the USB boot drive, and to monitor the installation. - -* *CPU Architecture:* Installing {product-title} on a single node supports `x86_64` and `arm64` CPU architectures. - -* *Supported platforms:* Installing {product-title} on a single node is supported on bare metal, vSphere, AWS, Red Hat OpenStack, and {rh-virtualization-first} platforms. - -* *Production-grade server:* Installing {product-title} on a single node requires a server with sufficient resources to run {product-title} services and a production workload. -+ -.Minimum resource requirements -[options="header"] -|==== -|Profile|vCPU|Memory|Storage -|Minimum|8 vCPU cores|16GB of RAM| 120GB -|==== -+ -[NOTE] -==== -* One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: -+ -(threads per core × cores) × sockets = vCPUs - -* Adding Operators during the installation process might increase the minimum resource requirements. -==== -+ -The server must have a Baseboard Management Controller (BMC) when booting with virtual media. - -* *Networking:* The server must have access to the internet or access to a local registry if it is not connected to a routable network. The server must have a DHCP reservation or a static IP address for the Kubernetes API, ingress route, and cluster node domain names. You must configure the DNS to resolve the IP address to each of the following fully qualified domain names (FQDN): -+ -.Required DNS records -[options="header"] -|==== -|Usage|FQDN|Description -|Kubernetes API|`api..`| Add a DNS A/AAAA or CNAME record. This record must be resolvable by clients external to the cluster. -|Internal API|`api-int..`| Add a DNS A/AAAA or CNAME record when creating the ISO manually. This record must be resolvable by nodes within the cluster. -|Ingress route|`*.apps..`| Add a wildcard DNS A/AAAA or CNAME record that targets the node. This record must be resolvable by clients external to the cluster. -|==== -+ -Without persistent IP addresses, communications between the `apiserver` and `etcd` might fail. diff --git a/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc b/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc deleted file mode 100644 index 69b6038d43ca..000000000000 --- a/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_sno/install-sno-preparing-to-install-sno.adoc - -:_content-type: CONCEPT -[id="additional-requirements-for-installing-on-a-single-node-on-aws_{context}"] -= Additional requirements for installing on a single node on AWS - -The AWS documentation for installer-provisioned installation is written with a high availability cluster consisting of three control plane nodes. When referring to the AWS documentation, consider the differences between the requirements for a {sno} cluster and a high availability cluster. - -* The required machines for cluster installation in AWS documentation indicates a temporary bootstrap machine, three control plane machines, and at least two compute machines. You require only a temporary bootstrap machine and one AWS instance for the control plane node and no worker nodes. - -* The minimum resource requirements for cluster installation in the AWS documentation indicates a control plane node with 4 vCPUs and 100GB of storage. For a single node cluster, you must have a minimum of 8 vCPU cores and 120GB of storage. - -* The `controlPlane.replicas` setting in the `install-config.yaml` file should be set to `1`. - -* The `compute.replicas` setting in the `install-config.yaml` file should be set to `0`. -This makes the control plane node schedulable. diff --git a/modules/installation-about-custom-azure-vnet.adoc b/modules/installation-about-custom-azure-vnet.adoc deleted file mode 100644 index 00c4bf45794d..000000000000 --- a/modules/installation-about-custom-azure-vnet.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - -ifeval::["{context}" == "installing-azure-government-region"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure-private: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-custom-azure-vnet_{context}"] -= About reusing a VNet for your {product-title} cluster - -In {product-title} {product-version}, you can deploy a cluster into an existing Azure Virtual Network (VNet) in Microsoft Azure. If you do, you must also use existing subnets within the VNet and routing rules. - -By deploying {product-title} into an existing Azure VNet, you might be able to avoid service limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VNet. - -[id="installation-about-custom-azure-vnet-requirements_{context}"] -== Requirements for using your VNet - -When you deploy a cluster by using an existing VNet, you must perform additional network configuration before you install the cluster. In installer-provisioned infrastructure clusters, the installer usually creates the following components, but it does not create them when you install into an existing VNet: - -* Subnets -* Route tables -* VNets -* Network Security Groups - -include::snippets/custom-dns-server.adoc[] - -If you use a custom VNet, you must correctly configure it and its subnets for the installation program and the cluster to use. The installation program cannot subdivide network ranges for the cluster to use, set route tables for the subnets, or set VNet options like DHCP, so you must do so before you install the cluster. - -The cluster must be able to access the resource group that contains the existing VNet and subnets. While all of the resources that the cluster creates are placed in a separate resource group that it creates, some network resources are used from a separate group. Some cluster Operators must be able to access resources in both resource groups. For example, the Machine API controller attaches NICS for the virtual machines that it creates to subnets from the networking resource group. - -Your VNet must meet the following characteristics: - -* The VNet's CIDR block must contain the `Networking.MachineCIDR` range, which is the IP address pool for cluster machines. -* The VNet and its subnets must belong to the same resource group, and the subnets must be configured to use Azure-assigned DHCP IP addresses instead of static IP addresses. - -You must provide two subnets within your VNet, one for the control plane machines and one for the compute machines. Because Azure distributes machines in different availability zones within the region that you specify, your cluster will have high availability by default. - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the specified subnets exist. -* There are two private subnets, one for the control plane machines and one for the compute machines. -* The subnet CIDRs belong to the machine CIDR that you specified. Machines are not provisioned in availability zones that you do not provide private subnets for. -ifdef::azure[] -If required, the installation program creates public load balancers that manage the control plane and worker nodes, and Azure allocates a public IP address to them. -endif::[] - -[NOTE] -==== -If you destroy a cluster that uses an existing VNet, the VNet is not deleted. -==== - -[id="installation-about-custom-azure-vnet-nsg-requirements_{context}"] -=== Network security group requirements - -The network security groups for the subnets that host the compute and control plane machines require specific access to ensure that the cluster communication is correct. You must create rules to allow access to the required cluster communication ports. - -[IMPORTANT] -==== -The network security group rules must be in place before you install the cluster. If you attempt to install a cluster without the required access, the installation program cannot reach the Azure APIs, and installation fails. -==== - -.Required ports -[options="header",cols="1,3,1,1"] -|=== - -|Port -|Description -|Control plane -|Compute - -|`80` -|Allows HTTP traffic -| -|x - -|`443` -|Allows HTTPS traffic -| -|x - -|`6443` -|Allows communication to the control plane machines -|x -| - -|`22623` -|Allows internal communication to the machine config server for provisioning machines -|x -| -|=== - -include::snippets/mcs-endpoint-limitation.adoc[] - -Because cluster components do not modify the user-provided network security groups, which the Kubernetes controllers update, a pseudo-network security group is created for the Kubernetes controller to modify without impacting the rest of the environment. - -.Additional resources - -* xref:../../networking/openshift_sdn/about-openshift-sdn.adoc#about-openshift-sdn[About the OpenShift SDN network plugin] - - -[id="installation-about-custom-azure-permissions_{context}"] -== Division of permissions - -Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, storage, and load balancers, but not networking-related components such as VNets, subnet, or ingress rules. - -The Azure credentials that you use when you create your cluster do not need the networking permissions that are required to make VNets and core networking components within the VNet, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage accounts, and nodes. - -[id="installation-about-custom-azure-vnet-isolation_{context}"] -== Isolation between clusters - -Because the cluster is unable to modify network security groups in an existing subnet, there is no way to isolate clusters from each other on the VNet. -//// -These are some of the details from the AWS version, and if any of them are relevant to Azure, they can be included. -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VNet. -* ICMP ingress is allowed to entire network. -* TCP 22 ingress (SSH) is allowed to the entire network. -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. -//// diff --git a/modules/installation-about-custom-gcp-vpc.adoc b/modules/installation-about-custom-gcp-vpc.adoc deleted file mode 100644 index 380524acd4ee..000000000000 --- a/modules/installation-about-custom-gcp-vpc.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/ - -:_content-type: CONCEPT -[id="installation-about-custom-gcp-vpc_{context}"] -= About using a custom VPC - -In {product-title} {product-version}, you can deploy a cluster into an existing VPC in Google Cloud Platform (GCP). If you do, you must also use existing subnets within the VPC and routing rules. - -By deploying {product-title} into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself. - -[id="installation-about-custom-gcp-vpcs-requirements_{context}"] -== Requirements for using your VPC - -The installation program will no longer create the following components: - -* VPC -* Subnets -* Cloud router -* Cloud NAT -* NAT IP addresses - -If you use a custom VPC, you must correctly configure it and its subnets for the installation program and the cluster to use. The installation program cannot subdivide network ranges for the cluster to use, set route tables for the subnets, or set VPC options like DHCP, so you must do so before you install the cluster. - -Your VPC and subnets must meet the following characteristics: - -* The VPC must be in the same GCP project that you deploy the {product-title} cluster to. -* To allow access to the internet from the control plane and compute machines, you must configure cloud NAT on the subnets to allow egress to it. These machines do not have a public address. Even if you do not require access to the internet, you must allow egress to the VPC network to obtain the installation program and images. Because multiple cloud NATs cannot be configured on the shared subnets, the installation program cannot configure it. - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the subnets that you specify exist and belong to the VPC that you specified. -* The subnet CIDRs belong to the machine CIDR. -* You must provide a subnet to deploy the cluster control plane and compute machines to. You can use the same subnet for both machine types. - -If you destroy a cluster that uses an existing VPC, the VPC is not deleted. - -[id="installation-about-custom-gcp-permissions_{context}"] -== Division of permissions - -Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or Ingress rules. - -The GCP credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage, and nodes. - -[id="installation-about-custom-gcp-vpcs-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is preserved by firewall rules that reference the machines in your cluster by the cluster's infrastructure ID. Only traffic within the cluster is allowed. - -If you deploy multiple clusters to the same VPC, the following components might share access between clusters: - -* The API, which is globally available with an external publishing strategy or available throughout the network in an internal publishing strategy -* Debugging tools, such as ports on VM instances that are open to the machine CIDR for SSH and ICMP access diff --git a/modules/installation-about-mirror-registry.adoc b/modules/installation-about-mirror-registry.adoc deleted file mode 100644 index fbaaf1e1f91b..000000000000 --- a/modules/installation-about-mirror-registry.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * scalability_and_performance/ztp-deploying-disconnected.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.adoc - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:oc-mirror: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:oc-mirror: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-mirror-registry_{context}"] -= About the mirror registry - -ifndef::oc-mirror[] -You can mirror the images that are required for {product-title} installation and subsequent product updates to a container mirror registry such as Red Hat Quay, JFrog Artifactory, Sonatype Nexus Repository, or Harbor. If you do not have access to a large-scale container registry, you can use the _mirror registry for Red Hat OpenShift_, a small-scale container registry included with {product-title} subscriptions. - -You can use any container registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2], such as Red Hat Quay, the _mirror registry for Red Hat OpenShift_, Artifactory, Sonatype Nexus Repository, or Harbor. Regardless of your chosen registry, the procedure to mirror content from Red Hat hosted sites on the internet to an isolated image registry is the same. After you mirror the content, you configure each cluster to retrieve this content from your mirror registry. -endif::[] -ifdef::oc-mirror[] -You can mirror the images that are required for {product-title} installation and subsequent product updates to a container mirror registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2], such as Red Hat Quay. If you do not have access to a large-scale container registry, you can use the _mirror registry for Red Hat OpenShift_, which is a small-scale container registry included with {product-title} subscriptions. - -Regardless of your chosen registry, the procedure to mirror content from Red Hat hosted sites on the internet to an isolated image registry is the same. After you mirror the content, you configure each cluster to retrieve this content from your mirror registry. -endif::[] - -[IMPORTANT] -==== -The {product-registry} cannot be used as the target registry because it does not support pushing without a tag, which is required during the mirroring process. -==== - -If choosing a container registry that is not the _mirror registry for Red Hat OpenShift_, it must be reachable by every machine in the clusters that you provision. If the registry is unreachable, installation, updating, or normal operations such as workload relocation might fail. For that reason, you must run mirror registries in a highly available way, and the mirror registries must at least match the production availability of your {product-title} clusters. - -When you populate your mirror registry with {product-title} images, you can follow two scenarios. If you have a host that can access both the internet and your mirror registry, but not your cluster nodes, you can directly mirror the content from that machine. This process is referred to as _connected mirroring_. If you have no such host, you must mirror the images to a file system and then bring that host or removable media into your restricted environment. This process is referred to as _disconnected mirroring_. - -For mirrored registries, to view the source of pulled images, you must review the `Trying to access` log entry in the CRI-O logs. Other methods to view the image pull source, such as using the `crictl images` command on a node, show the non-mirrored image name, even though the image is pulled from the mirrored location. - -[NOTE] -==== -Red Hat does not test third party registries with {product-title}. -==== - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:!oc-mirror: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!oc-mirror: -endif::[] \ No newline at end of file diff --git a/modules/installation-about-restricted-network.adoc b/modules/installation-about-restricted-network.adoc deleted file mode 100644 index 9c6f4379f9ed..000000000000 --- a/modules/installation-about-restricted-network.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:ipi: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-restricted-networks_{context}"] -= About installations in restricted networks - -In {product-title} {product-version}, you can perform an installation that does not -require an active connection to the internet to obtain software components. Restricted network installations can be completed using installer-provisioned infrastructure or user-provisioned infrastructure, depending on the cloud platform to which you are installing the cluster. - -ifndef::ibm-power[] -If you choose to perform a restricted network installation on a cloud platform, you -still require access to its cloud APIs. Some cloud functions, like -Amazon Web Service's Route 53 DNS and IAM services, require internet access. -//behind a proxy -Depending on your network, you might require less internet -access for an installation on bare metal hardware, Nutanix, or on VMware vSphere. -endif::ibm-power[] - -To complete a restricted network installation, you must create a registry that -mirrors the contents of the {product-registry} and contains the -installation media. You can create this registry on a mirror host, which can -access both the internet and your closed network, or by using other methods -that meet your restrictions. - -ifndef::ipi[] -[IMPORTANT] -==== -Because of the complexity of the configuration for user-provisioned installations, consider completing a standard user-provisioned infrastructure installation before you attempt a restricted network installation using user-provisioned infrastructure. Completing this test installation might make it easier to isolate and troubleshoot any issues that might arise during your installation in a restricted network. -==== -endif::ipi[] - -[id="installation-restricted-network-limits_{context}"] -== Additional limits - -Clusters in restricted networks have the following additional limitations and restrictions: - -* The `ClusterVersion` status includes an `Unable to retrieve available updates` -error. -//* The authentication Operator might randomly fail. -* By default, you cannot use the contents of the Developer Catalog because - you cannot access the required image stream tags. -//* The `TelemeterClientDown` and `Watchdog` alerts from the monitoring Operator always display. - -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!ipi: -endif::[] diff --git a/modules/installation-adding-nutanix-root-certificates.adoc b/modules/installation-adding-nutanix-root-certificates.adoc deleted file mode 100644 index d225d2834406..000000000000 --- a/modules/installation-adding-nutanix-root-certificates.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: PROCEDURE -[id="installation-adding-nutanix-root-certificates_{context}"] -= Adding Nutanix root CA certificates to your system trust - -Because the installation program requires access to the Prism Central API, you must add your Nutanix trusted root CA certificates to your system trust before you install an {product-title} cluster. - -.Procedure - -. From the Prism Central web console, download the Nutanix root CA certificates. -. Extract the compressed file that contains the Nutanix root CA certificates. -. Add the files for your operating system to the system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# cp certs/lin/* /etc/pki/ca-trust/source/anchors ----- - -. Update your system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# update-ca-trust extract ----- diff --git a/modules/installation-adding-registry-pull-secret.adoc b/modules/installation-adding-registry-pull-secret.adoc deleted file mode 100644 index 6d56f022679c..000000000000 --- a/modules/installation-adding-registry-pull-secret.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc -// * updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.adoc - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:restricted: -:update-oc-mirror: -endif::[] - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:restricted: -endif::[] - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:restricted: -:oc-mirror: -endif::[] - -:_content-type: PROCEDURE -[id="installation-adding-registry-pull-secret_{context}"] -= Configuring credentials that allow images to be mirrored - -Create a container image registry credentials file that allows mirroring -images from Red Hat to your mirror. - -ifdef::restricted[] -[WARNING] -==== -Do not use this image registry credentials file as the pull secret when you install a cluster. If you provide this file when you install cluster, all of the machines in the cluster will have write access to your mirror registry. -==== -endif::restricted[] - -ifdef::restricted[] -[WARNING] -==== -This process requires that you have write access to a container image registry on the mirror registry and adds the credentials to a registry pull secret. -==== - -endif::restricted[] - -.Prerequisites - -* You configured a mirror registry to use in your disconnected environment. -ifdef::restricted[] -* You identified an image repository location on your mirror registry to mirror images into. -* You provisioned a mirror registry account that allows images to be uploaded to that image repository. -endif::restricted[] - -.Procedure - -Complete the following steps on the installation host: - -ifndef::openshift-origin[] -. Download your `registry.redhat.io` {cluster-manager-url-pull}. - -. Make a copy of your pull secret in JSON format: -+ -[source,terminal] ----- -$ cat ./pull-secret | jq . > / <1> ----- -<1> Specify the path to the folder to store the pull secret in and a name for the JSON file that you create. -+ -The contents of the file resemble the following example: -+ -[source,json] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "quay.io": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "registry.connect.redhat.com": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - }, - "registry.redhat.io": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - } - } -} ----- -// An additional step for following this procedure when using oc-mirror as part of the disconnected install process. -ifdef::oc-mirror[] -. Save the file either as `~/.docker/config.json` or `$XDG_RUNTIME_DIR/containers/auth.json`. -endif::[] -// Similar to the additional step above, except it is framed as optional because it is included in a disconnected update page (where users may or may not use oc-mirror for their process) -ifdef::update-oc-mirror[] -. Optional: If using the oc-mirror plugin, save the file either as `~/.docker/config.json` or `$XDG_RUNTIME_DIR/containers/auth.json`. -endif::[] -endif::[] - -. Generate the base64-encoded user name and password or token for your mirror registry: -+ -[source,terminal] ----- -$ echo -n ':' | base64 -w0 <1> -BGVtbYk3ZHAtqXs= ----- -<1> For `` and ``, specify the user name and password that you configured for your registry. - -ifndef::openshift-origin[] -. Edit the JSON -endif::[] -ifdef::openshift-origin[] -. Create a `.json` -endif::[] -file and add a section that describes your registry to it: -+ -[source,json] ----- -ifndef::openshift-origin[] - "auths": { - "": { <1> - "auth": "", <2> - "email": "you@example.com" - } - }, -endif::[] -ifdef::openshift-origin[] -{ - "auths": { - "": { <1> - "auth": "", <2> - "email": "you@example.com" - } - } -} -endif::[] ----- -<1> For ``, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example, -`registry.example.com` or `registry.example.com:8443` -<2> For ``, specify the base64-encoded user name and password for -the mirror registry. -+ -ifndef::openshift-origin[] -The file resembles the following example: -+ -[source,json] ----- -{ - "auths": { - "registry.example.com": { - "auth": "BGVtbYk3ZHAtqXs=", - "email": "you@example.com" - }, - "cloud.openshift.com": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "quay.io": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "registry.connect.redhat.com": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - }, - "registry.redhat.io": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - } - } -} ----- -endif::[] - -//// -This is not currently working as intended. -. Log in to your registry by using the following command: -+ -[source,terminal] ----- -$ oc registry login --to ./pull-secret.json --registry "" --auth-basic=: ----- -+ -Provide both the registry details and a valid user name and password for the registry. -//// - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:!restricted: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!restricted: -:!update-oc-mirror: -endif::[] - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:!restricted: -:!oc-mirror: -endif::[] diff --git a/modules/installation-adding-vcenter-root-certificates.adoc b/modules/installation-adding-vcenter-root-certificates.adoc deleted file mode 100644 index e38ed94989dd..000000000000 --- a/modules/installation-adding-vcenter-root-certificates.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc - -:_content-type: PROCEDURE -[id="installation-adding-vcenter-root-certificates_{context}"] -= Adding vCenter root CA certificates to your system trust - -Because the installation program requires access to your vCenter's API, you must add your vCenter's trusted root CA certificates to your system trust before you install an {product-title} cluster. - -.Procedure - -. From the vCenter home page, download the vCenter's root CA certificates. Click *Download trusted root CA certificates* in the vSphere Web Services SDK section. The `/certs/download.zip` file downloads. - -. Extract the compressed file that contains the vCenter root CA certificates. The contents of the compressed file resemble the following file structure: -+ ----- -certs -├── lin -│ ├── 108f4d17.0 -│ ├── 108f4d17.r1 -│ ├── 7e757f6a.0 -│ ├── 8e4f8471.0 -│ └── 8e4f8471.r0 -├── mac -│ ├── 108f4d17.0 -│ ├── 108f4d17.r1 -│ ├── 7e757f6a.0 -│ ├── 8e4f8471.0 -│ └── 8e4f8471.r0 -└── win - ├── 108f4d17.0.crt - ├── 108f4d17.r1.crl - ├── 7e757f6a.0.crt - ├── 8e4f8471.0.crt - └── 8e4f8471.r0.crl - -3 directories, 15 files ----- - -. Add the files for your operating system to the system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# cp certs/lin/* /etc/pki/ca-trust/source/anchors ----- - -. Update your system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# update-ca-trust extract ----- diff --git a/modules/installation-alibaba-config-yaml.adoc b/modules/installation-alibaba-config-yaml.adoc deleted file mode 100644 index c3fb7aa75be1..000000000000 --- a/modules/installation-alibaba-config-yaml.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-customizations.adoc - -:_content-type: REFERENCE -[id="installation-alibaba-config-yaml_{context}"] -= Sample customized install-config.yaml file for Alibaba Cloud - -You can customize the installation configuration file (`install-config.yaml`) to specify more details about -your cluster's platform or modify the values of the required -parameters. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: alicloud-dev.devcluster.openshift.com -credentialsMode: Manual -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: {} - replicas: 3 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: test-cluster <1> - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <2> - serviceNetwork: - - 172.30.0.0/16 -platform: - alibabacloud: - defaultMachinePlatform: <3> - instanceType: ecs.g6.xlarge - systemDiskCategory: cloud_efficiency - systemDiskSize: 200 - region: ap-southeast-1 <4> - resourceGroupID: rg-acfnw6j3hyai <5> - vpcID: vpc-0xifdjerdibmaqvtjob2b <8> - vswitchIDs: <8> - - vsw-0xi8ycgwc8wv5rhviwdq5 - - vsw-0xiy6v3z2tedv009b4pz2 -publish: External -pullSecret: '{"auths": {"cloud.openshift.com": {"auth": ... }' <6> -sshKey: | - ssh-rsa AAAA... <7> ----- -<1> Required. The installation program prompts you for a cluster name. -<2> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<3> Optional. Specify parameters for machine pools that do not define their own platform configuration. -<4> Required. The installation program prompts you for the region to deploy the cluster to. -<5> Optional. Specify an existing resource group where the cluster should be installed. -<6> Required. The installation program prompts you for the pull secret. -<7> Optional. The installation program prompts you for the SSH key value that you use to access the machines in your cluster. -<8> Optional. These are example vswitchID values. diff --git a/modules/installation-alibaba-dns.adoc b/modules/installation-alibaba-dns.adoc deleted file mode 100644 index 4af3415ddf36..000000000000 --- a/modules/installation-alibaba-dns.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-account.adoc - -:_content-type: PROCEDURE -[id="installation-alibaba-dns_{context}"] -= Registering and Configuring Alibaba Cloud Domain - -To install {product-title}, the Alibaba Cloud account you use must have a dedicated public hosted zone in your account. This zone must be authoritative for the domain. This service provides cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an existing domain and registrar or obtain a new one through Alibaba Cloud or another source. -+ -[NOTE] -==== -If you purchase a new domain through Alibaba Cloud, it takes time for the relevant DNS changes to propagate. For more information about purchasing domains through Alibaba Cloud, see link:https://www.alibabacloud.com/domain[Alibaba Cloud domains]. -==== - -. If you are using an existing domain and registrar, migrate its DNS to Alibaba Cloud. See link:https://www.alibabacloud.com/help/en/doc-detail/42479.htm[Domain name transfer] -in the Alibaba Cloud documentation. - -. Configure DNS for your domain. This includes: -* link:https://partners-intl.aliyun.com/help/en/doc-detail/54068.htm?spm=a2c63.p38356.0.0.427d2054k5gZOr#task-1830383[Registering a generic domain name]. -* link:https://partners-intl.aliyun.com/help/en/doc-detail/108953.htm?spm=a2c63.p38356.0.0.3c62433fjUrdZG#section-qyn-s41-ygb[Completing real-name verification for your domain name]. -* link:https://account.alibabacloud.com/login/login.htm[Applying for an Internet Content Provider (ICP) filing]. -* link:https://www.alibabacloud.com/product/dns/pricing?spm=a3c0i.23458820.2359477120.2.36ca7d3fe0b5KL[Enabling domain name resolution]. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, such as `clusters.openshiftcorp.com`. - -. If you are using a subdomain, follow the procedures of your company to add its delegation records to the parent domain. - -//// -.Question - -Can Alibaba provide a link(s) to their doc on how to complete each task under step 3 in their doc? Could not find content in their help. -//// diff --git a/modules/installation-alibaba-regions.adoc b/modules/installation-alibaba-regions.adoc deleted file mode 100644 index 0667c6b4e912..000000000000 --- a/modules/installation-alibaba-regions.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-account.adoc - -:_content-type: REFERENCE -[id="installation-alibaba-regions_{context}"] -= Supported Alibaba regions - -You can deploy an {product-title} cluster to the regions listed in the link:https://www.alibabacloud.com/help/en/doc-detail/188196.htm[Alibaba _Regions and zones_ documentation]. - -//// -Answer from Gaurav Singh (PM) - -All of the regions (in mainland china and outside mainland china ) listed in this doc https://www.alibabacloud.com/help/doc-detail/188196.htm[Alibaba doc] will be shown as option to the customer to deploy openshift . We might need to test all of them. -//// diff --git a/modules/installation-applying-aws-security-groups.adoc b/modules/installation-applying-aws-security-groups.adoc deleted file mode 100644 index a63a5fb7ac51..000000000000 --- a/modules/installation-applying-aws-security-groups.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -:_content-type: PROCEDURE -[id="installation-aws-vpc-security-groups_{context}"] -= Applying existing AWS security groups to the cluster - -Applying existing AWS security groups to your control plane and compute machines can help you meet the security needs of your organization, in such cases where you need to control the incoming or outgoing traffic of these machines. - -.Prerequisites -* You have created the security groups in AWS. For more information, see the AWS documentation about working with link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html[security groups]. -* The security groups must be associated with the existing VPC that you are deploying the cluster to. The security groups cannot be associated with another VPC. -* You have an existing `install-config.yaml` file. - -.Procedure - -. In the `install-config.yaml` file, edit the `compute.platform.aws.additionalSecurityGroupIDs` parameter to specify one or more custom security groups for your compute machines. -. Edit the `controlPlane.platform.aws.additionalSecurityGroupIDs` parameter to specify one or more custom security groups for your control plane machines. -. Save the file and reference it when deploying the cluster. - -.Sample `install-config.yaml` file that specifies custom security groups -[source,yaml] ----- -# ... -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - additionalSecurityGroupIDs: - - sg-1 <1> - - sg-2 - replicas: 3 -controlPlane: - hyperthreading: Enabled - name: master - platform: - aws: - additionalSecurityGroupIDs: - - sg-3 - - sg-4 - replicas: 3 -platform: - aws: - region: us-east-1 - subnets: <2> - - subnet-1 - - subnet-2 - - subnet-3 ----- -<1> Specify the name of the security group as it appears in the Amazon EC2 console, including the `sg` prefix. -<2> Specify subnets for each availability zone that your cluster uses. diff --git a/modules/installation-approve-csrs.adoc b/modules/installation-approve-csrs.adoc deleted file mode 100644 index 990a81903c7d..000000000000 --- a/modules/installation-approve-csrs.adoc +++ /dev/null @@ -1,199 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-restricted-networks.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * machine_management/adding-rhel-compute.adoc -// * machine_management/more-rhel-compute.adoc -// * machine_management/user_provisioned/adding-aws-compute-user-infra.adoc -// * machine_management/user_provisioned/adding-bare-metal-compute-user-infra.adoc -// * machine_management/user_provisioned/adding-vsphere-compute-user-infra.adoc -// * post_installation_configuration/node-tasks.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc - - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] - -:_content-type: PROCEDURE -[id="installation-approve-csrs_{context}"] -= Approving the certificate signing requests for your machines - -When you add machines to a cluster, two pending certificate signing requests (CSRs) are generated for each machine that you added. You must confirm that these CSRs are approved or, if necessary, approve them yourself. The client requests must be approved first, followed by the server requests. - -.Prerequisites - -* You added machines to your cluster. - -.Procedure - -. Confirm that the cluster recognizes the machines: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0 Ready master 63m v1.27.3 -master-1 Ready master 63m v1.27.3 -master-2 Ready master 64m v1.27.3 ----- -+ -The output lists all of the machines that you created. -+ -[NOTE] -==== -The preceding output might not include the compute nodes, also known as worker nodes, until some CSRs are approved. -==== - -. Review the pending CSRs and ensure that you see the client requests with the `Pending` or `Approved` status for each machine that you added to the cluster: -+ -ifndef::ibm-z,ibm-z-kvm[] -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output -[source,terminal] ----- -NAME AGE REQUESTOR CONDITION -csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -... ----- -+ -In this example, two machines are joining the cluster. You might see more approved CSRs in the list. -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -[source,terminal] ----- -$ oc get csr ----- -+ -[source,terminal] -.Example output ----- -NAME AGE REQUESTOR CONDITION -csr-mddf5 20m system:node:master-01.example.com Approved,Issued -csr-z5rln 16m system:node:worker-21.example.com Approved,Issued ----- -endif::ibm-z,ibm-z-kvm[] - -. If the CSRs were not approved, after all of the pending CSRs for the machines you added are in `Pending` status, approve the CSRs for your cluster machines: -+ -[NOTE] -==== -Because the CSRs rotate automatically, approve your CSRs within an hour of adding the machines to the cluster. If you do not approve them within an hour, the certificates will rotate, and more than two certificates will be present for each node. You must approve all of these certificates. After the client CSR is approved, the Kubelet creates a secondary CSR for the serving certificate, which requires manual approval. Then, subsequent serving certificate renewal requests are automatically approved by the `machine-approver` if the Kubelet requests a new certificate with identical parameters. -==== -+ -[NOTE] -==== -For clusters running on platforms that are not machine API enabled, such as bare metal and other user-provisioned infrastructure, you must implement a method of automatically approving the kubelet serving certificate requests (CSRs). If a request is not approved, then the `oc exec`, `oc rsh`, and `oc logs` commands cannot succeed, because a serving certificate is required when the API server connects to the kubelet. Any operation that contacts the Kubelet endpoint requires this certificate approval to be in place. The method must watch for new CSRs, confirm that the CSR was submitted by the `node-bootstrapper` service account in the `system:node` or `system:admin` groups, and confirm the identity of the node. -==== - -** To approve them individually, run the following command for each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <1> ----- -<1> `` is the name of a CSR from the list of current CSRs. - -** To approve all pending CSRs, run the following command: -+ -[source,terminal] ----- -$ oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve ----- -+ -[NOTE] -==== -Some Operators might not become available until some CSRs are approved. -==== - -. Now that your client requests are approved, you must review the server requests for each machine that you added to the cluster: -+ -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output -[source,terminal] ----- -NAME AGE REQUESTOR CONDITION -csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending -csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending -... ----- - -. If the remaining CSRs are not approved, and are in the `Pending` status, approve the CSRs for your cluster machines: - -** To approve them individually, run the following command for each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <1> ----- -<1> `` is the name of a CSR from the list of current CSRs. - -** To approve all pending CSRs, run the following command: -+ -[source,terminal] ----- -$ oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs oc adm certificate approve ----- - -. After all client and server CSRs have been approved, the machines have the `Ready` status. Verify this by running the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0 Ready master 73m v1.27.3 -master-1 Ready master 73m v1.27.3 -master-2 Ready master 74m v1.27.3 -worker-0 Ready worker 11m v1.27.3 -worker-1 Ready worker 11m v1.27.3 ----- -+ -[NOTE] -==== -It can take a few minutes after approval of the server CSRs for the machines to transition to the `Ready` status. -==== - -.Additional information -* For more information on CSRs, see link:https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/[Certificate Signing Requests]. - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] diff --git a/modules/installation-arm-bootstrap.adoc b/modules/installation-arm-bootstrap.adoc deleted file mode 100644 index 0840532634cd..000000000000 --- a/modules/installation-arm-bootstrap.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-bootstrap_{context}"] -= ARM template for the bootstrap machine - -You can use the following Azure Resource Manager (ARM) template to deploy the -bootstrap machine that you need for your {product-title} cluster: - -.`04_bootstrap.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/04_bootstrap.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/04_bootstrap.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-control-plane.adoc b/modules/installation-arm-control-plane.adoc deleted file mode 100644 index 59b8127da2c0..000000000000 --- a/modules/installation-arm-control-plane.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-control-plane_{context}"] -= ARM template for control plane machines - -You can use the following Azure Resource Manager (ARM) template to deploy the -control plane machines that you need for your {product-title} cluster: - -.`05_masters.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/05_masters.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/05_masters.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-dns.adoc b/modules/installation-arm-dns.adoc deleted file mode 100644 index f15f4cd08ab0..000000000000 --- a/modules/installation-arm-dns.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-dns_{context}"] -= ARM template for the network and load balancers - -You can use the following Azure Resource Manager (ARM) template to deploy the -networking objects and load balancers that you need for your {product-title} -cluster: - -.`03_infra.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/03_infra.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/03_infra.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-image-storage.adoc b/modules/installation-arm-image-storage.adoc deleted file mode 100644 index b22d620d6c42..000000000000 --- a/modules/installation-arm-image-storage.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-image-storage_{context}"] -= ARM template for image storage - -You can use the following Azure Resource Manager (ARM) template to deploy the -stored {op-system-first} image that you need for your {product-title} cluster: - -.`02_storage.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/02_storage.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/02_storage.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-vnet.adoc b/modules/installation-arm-vnet.adoc deleted file mode 100644 index 8c1f64237b4c..000000000000 --- a/modules/installation-arm-vnet.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-vnet_{context}"] -= ARM template for the VNet - -You can use the following Azure Resource Manager (ARM) template to deploy the -VNet that you need for your {product-title} cluster: - -.`01_vnet.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/01_vnet.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/01_vnet.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-worker.adoc b/modules/installation-arm-worker.adoc deleted file mode 100644 index 6586d94624ff..000000000000 --- a/modules/installation-arm-worker.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-worker_{context}"] -= ARM template for worker machines - -You can use the following Azure Resource Manager (ARM) template to deploy the -worker machines that you need for your {product-title} cluster: - -.`06_workers.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/06_workers.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/06_workers.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-aws-about-government-region.adoc b/modules/installation-aws-about-government-region.adoc deleted file mode 100644 index 03ca50398f50..000000000000 --- a/modules/installation-aws-about-government-region.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc - -ifeval::["{context}" == "installing-aws-government-region"] -:aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] - -[id="installation-aws-about-gov-secret-region_{context}"] -ifdef::aws-gov[] -= AWS government regions -endif::aws-gov[] -ifdef::aws-secret[] -= AWS secret regions -endif::aws-secret[] - -ifdef::aws-gov[] -{product-title} supports deploying a cluster to an link:https://aws.amazon.com/govcloud-us[AWS GovCloud (US)] region. -endif::aws-gov[] - -ifdef::aws-gov[] -The following AWS GovCloud partitions are supported: - -* `us-gov-east-1` -* `us-gov-west-1` -endif::aws-gov[] - -ifdef::aws-secret[] -The following AWS secret partitions are supported: - -* `us-isob-east-1` (SC2S) -* `us-iso-east-1` (C2S) - -[NOTE] -==== -The maximum supported MTU in an AWS SC2S and C2S Regions is not the same as -AWS commercial. For more information about configuring MTU during installation, -see the _Cluster Network Operator configuration object_ section in _Installing -a cluster on AWS with network customizations_ -==== -endif::aws-secret[] - -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] diff --git a/modules/installation-aws-access-analyzer.adoc b/modules/installation-aws-access-analyzer.adoc deleted file mode 100644 index 2ec78ad2588d..000000000000 --- a/modules/installation-aws-access-analyzer.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: PROCEDURE -[id="create-custom-permissions-for-iam-instance-profiles_{context}"] -= Using AWS IAM Analyzer to create policy templates - -The minimal set of permissions that the control plane and compute instance profiles require depends on how the cluster is configured for its daily operation. - -One way to determine which permissions the cluster instances require is to use the AWS Identity and Access Management Access Analyzer (IAM Access Analyzer) to create a policy template: - -* A policy template contains the permissions the cluster has used over a specified period of time. -* You can then use the template to create policies with fine-grained permissions. - -.Procedure - -The overall process could be: - -. Ensure that CloudTrail is enabled. CloudTrail records all of the actions and events in your AWS account, including the API calls that are required to create a policy template. For more information, see the AWS documentation for https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-getting-started.html[working with CloudTrail]. -. Create an instance profile for control plane instances and an instance profile for compute instances. Be sure to assign each role a permissive policy, such as PowerUserAccess. For more information, see the AWS documentation for -https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html[creating instance profile roles]. -. Install the cluster in a development environment and configure it as required. Be sure to deploy all of applications the cluster will host in a production environment. -. Test the cluster thoroughly. Testing the cluster ensures that all of the required API calls are logged. -. Use the IAM Access Analyzer to create a policy template for each instance profile. For more information, see the AWS documentation for https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-policy-generation.html[generating policies based on the CloudTrail logs]. -. Create and add a fine-grained policy to each instance profile. -. Remove the permissive policy from each instance profile. -. Deploy a production cluster using the existing instance profiles with the new policies. - -[NOTE] -==== -You can add https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html[IAM Conditions] to your policy to make it more restrictive and compliant with your organization security requirements. -==== diff --git a/modules/installation-aws-add-iam-roles.adoc b/modules/installation-aws-add-iam-roles.adoc deleted file mode 100644 index 0e7ce88aa415..000000000000 --- a/modules/installation-aws-add-iam-roles.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="specify-an-existing-iam-role_{context}"] -= Specifying an existing IAM role - -Instead of allowing the installation program to create IAM instance profiles with the default permissions, you can use the `install-config.yaml` file to specify an existing IAM role for control plane and compute instances. - -.Prerequisites - -* You have an existing `install-config.yaml` file. - -.Procedure - -. Update `compute.platform.aws.iamRole` with an existing role for the control plane machines. -+ -.Sample `install-config.yaml` file with an IAM role for compute instances -[source,yaml] ----- -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - iamRole: ExampleRole ----- -. Update `controlPlane.platform.aws.iamRole` with an existing role for the compute machines. -+ -.Sample `install-config.yaml` file with an IAM role for control plane instances -[source,yaml] ----- -controlPlane: - hyperthreading: Enabled - name: master - platform: - aws: - iamRole: ExampleRole ----- -. Save the file and reference it when installing the {product-title} cluster. diff --git a/modules/installation-aws-add-local-zone-locations.adoc b/modules/installation-aws-add-local-zone-locations.adoc deleted file mode 100644 index 691e858b509c..000000000000 --- a/modules/installation-aws-add-local-zone-locations.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="installation-aws-add-local-zone-locations_{context}"] -= Opting into AWS Local Zones - -If you plan to create the subnets in AWS Local Zones, you must opt in to each zone group separately. - -.Prerequisites - -* You have installed the AWS CLI. -* You have determined into which region you will deploy your {product-title} cluster. - -.Procedure - -. Export a variable to contain the name of the region in which you plan to deploy your {product-title} cluster by running the following command: -+ -[source,terminal] ----- -$ export CLUSTER_REGION="" <1> ----- -<1> For ``, specify a valid AWS region name, such as `us-east-1`. - -. List the zones that are available in your region by running the following command: -+ -[source,terminal] ----- -$ aws --region ${CLUSTER_REGION} ec2 describe-availability-zones \ - --query 'AvailabilityZones[].[{ZoneName: ZoneName, GroupName: GroupName, Status: OptInStatus}]' \ - --filters Name=zone-type,Values=local-zone \ - --all-availability-zones ----- -+ -Depending on the region, the list of available zones can be long. The command will return the following fields: -+ -`ZoneName`:: The name of the Local Zone. -`GroupName`:: The group that the zone is part of. You need to save this name to opt in. -`Status`:: The status of the Local Zone group. If the status is `not-opted-in`, you must opt in the `GroupName` by running the commands that follow. - -. Export a variable to contain the name of the Local Zone to host your VPC by running the following command: -+ -[source,terminal] ----- -$ export ZONE_GROUP_NAME="" <1> ----- -+ -where: - -:: Specifies the name of the group of the Local Zone you want to create subnets on. For example, specify `us-east-1-nyc-1` to use the zone `us-east-1-nyc-1a`, US East (New York). - -. Opt in to the zone group on your AWS account by running the following command: -+ -[source,terminal] ----- -$ aws ec2 modify-availability-zone-group \ - --group-name "${ZONE_GROUP_NAME}" \ - --opt-in-status opted-in ----- diff --git a/modules/installation-aws-ami-stream-metadata.adoc b/modules/installation-aws-ami-stream-metadata.adoc deleted file mode 100644 index f08a0b2202f8..000000000000 --- a/modules/installation-aws-ami-stream-metadata.adoc +++ /dev/null @@ -1,61 +0,0 @@ -//TODO: Add the module include to the following assemblies -//TODO: Create related modules for OpenStack (QCOW2) and Bare Metal (ISO) - -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-ami-stream-metadata_{context}"] -= Accessing {op-system} AMIs with stream metadata - -In {product-title}, _stream metadata_ provides standardized metadata about {op-system} in the JSON format and injects the metadata into the cluster. Stream metadata is a stable format that supports multiple architectures and is intended to be self-documenting for maintaining automation. - -You can use the `coreos print-stream-json` sub-command of `openshift-install` to access information about the boot images in the stream metadata format. This command provides a method for printing stream metadata in a scriptable, machine-readable format. - -For user-provisioned installations, the `openshift-install` binary contains references to the version of {op-system} boot images that are tested for use with {product-title}, such as the AWS AMI. - -.Procedure - -To parse the stream metadata, use one of the following methods: - -* From a Go program, use the official `stream-metadata-go` library at https://github.com/coreos/stream-metadata-go. You can also view example code in the library. - -* From another programming language, such as Python or Ruby, use the JSON library of your preferred programming language. - -* From a command-line utility that handles JSON data, such as `jq`: - -** Print the current `x86_64` -ifndef::openshift-origin[] -or `aarch64` -endif::openshift-origin[] -AMI for an AWS region, such as `us-west-1`: -+ -.For x86_64 -[source,terminal] ----- -$ openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.images.aws.regions["us-west-1"].image' ----- -+ -.Example output -[source,terminal] ----- -ami-0d3e625f84626bbda ----- -+ -ifndef::openshift-origin[] -.For aarch64 -[source,terminal] ----- -$ openshift-install coreos print-stream-json | jq -r '.architectures.aarch64.images.aws.regions["us-west-1"].image' ----- -+ -.Example output -[source,terminal] ----- -ami-0af1d3b7fa5be2131 ----- -+ -endif::openshift-origin[] -The output of this command is the AWS AMI ID for your designated architecture and the `us-west-1` region. The AMI must belong to the same region as the cluster. diff --git a/modules/installation-aws-arm-tested-machine-types.adoc b/modules/installation-aws-arm-tested-machine-types.adoc deleted file mode 100644 index 31e27b899e0c..000000000000 --- a/modules/installation-aws-arm-tested-machine-types.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-china.adoc -// installing/installing_aws/installing-aws-customizations.adoc -// installing/installing_aws/installing-aws-government-region.adoc -// installing/installing_aws/installing-aws-network-customizations.adoc -// installing/installing_aws/installing-aws-private.adoc -// installing/installing_aws/installing-aws-user-infra.adoc -// installing/installing_aws/installing-aws-vpc.adoc -// installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-arm-tested-machine-types_{context}"] -= Tested instance types for AWS on 64-bit ARM infrastructures - -The following Amazon Web Services (AWS) 64-bit ARM instance types have been tested with {product-title}. - -[NOTE] -==== -Use the machine types included in the following charts for your AWS ARM instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". -==== - -.Machine types based on 64-bit ARM architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/aws/tested_instance_types_aarch64.md[] -==== diff --git a/modules/installation-aws-config-yaml.adoc b/modules/installation-aws-config-yaml.adoc deleted file mode 100644 index 1b9401aee4bc..000000000000 --- a/modules/installation-aws-config-yaml.adoc +++ /dev/null @@ -1,467 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -ifeval::["{context}" == "installing-aws-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" != "installing-aws-network-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:vpc: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:vpc: -:private: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:vpc: -:private: -:gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:vpc: -:private: -:secret: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:vpc: -:private: -:china: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws-outposts: -endif::[] - -:_content-type: REFERENCE -[id="installation-aws-config-yaml_{context}"] -= Sample customized install-config.yaml file for AWS - -You can customize the installation configuration file (`install-config.yaml`) to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -ifndef::china,gov,secret[] -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program and modify it. -==== -endif::china,gov,secret[] - -ifdef::china,gov,secret[] -[IMPORTANT] -==== -This sample YAML file is provided for reference only. Use it as a resource to enter parameter values into the installation configuration file that you created manually. -==== -endif::china,gov,secret[] - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -credentialsMode: Mint <2> -controlPlane: <3> <4> - hyperthreading: Enabled <5> - name: master -ifndef::aws-outposts[] - platform: - aws: -ifndef::openshift-origin[] - lbType: NLB -endif::openshift-origin[] - zones: -ifdef::china[] - - cn-north-1a - - cn-north-1b -endif::china[] -ifdef::gov[] - - us-gov-west-1a - - us-gov-west-1b -endif::gov[] -ifdef::secret[] - - us-iso-east-1a - - us-iso-east-1b -endif::secret[] -ifndef::gov,china,secret[] - - us-west-2a - - us-west-2b -endif::gov,china,secret[] - rootVolume: - iops: 4000 - size: 500 - type: io1 <6> - metadataService: - authentication: Optional <7> - type: m6i.xlarge -endif::aws-outposts[] -ifdef::aws-outposts[] - platform: {} -endif::aws-outposts[] - replicas: 3 -compute: <3> -- hyperthreading: Enabled <5> - name: worker - platform: - aws: -ifndef::aws-outposts[] - rootVolume: - iops: 2000 - size: 500 - type: io1 <6> - metadataService: - authentication: Optional <7> - type: c5.4xlarge - zones: -ifdef::china[] - - cn-north-1a -endif::china[] -ifdef::gov[] - - us-gov-west-1c -endif::gov[] -ifdef::secret[] - - us-iso-east-1a - - us-iso-east-1b -endif::secret[] -ifndef::gov,china,secret[] - - us-west-2c -endif::gov,china,secret[] -endif::aws-outposts[] -ifdef::aws-outposts[] - type: m5.large <6> - zones: - - us-east-1a <7> - rootVolume: - type: gp2 <8> - size: 120 -endif::aws-outposts[] - replicas: 3 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <3> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 -ifndef::aws-outposts[] - networkType: OVNKubernetes <8> -endif::aws-outposts[] -ifdef::aws-outposts[] - networkType: OVNKubernetes <9> -endif::aws-outposts[] - serviceNetwork: - - 172.30.0.0/16 -platform: - aws: -ifndef::gov,china,secret[] - region: us-west-2 <1> - propagateUserTags: true <3> -endif::gov,china,secret[] -ifdef::china[] - region: cn-north-1 <1> - propagateUserTags: true <3> -endif::china[] -ifdef::gov[] - region: us-gov-west-1 <1> - propagateUserTags: true <3> -endif::gov[] -ifdef::secret[] - region: us-iso-east-1 <1> - propagateUserTags: true <3> -endif::secret[] - userTags: - adminContact: jdoe - costCenter: 7536 -ifdef::vpc,restricted[] - subnets: <9> - - subnet-1 - - subnet-2 - - subnet-3 -endif::vpc,restricted[] -ifdef::aws-outposts[] - subnets: <10> - - subnet-1 - - subnet-2 - - subnet-3 -endif::aws-outposts[] -ifdef::vpc,restricted[] -ifndef::secret,china[] - amiID: ami-96c6f8f7 <10> -endif::secret,china[] -ifdef::secret,china[] - amiID: ami-96c6f8f7 <1> <10> -endif::secret,china[] - serviceEndpoints: <11> - - name: ec2 -ifndef::china[] - url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com -endif::china[] -ifdef::china[] - url: https://vpce-id.ec2.cn-north-1.vpce.amazonaws.com.cn -endif::china[] - hostedZone: Z3URY6TWQ91KVV <12> -endif::vpc,restricted[] -ifndef::vpc,restricted,aws-outposts[] - amiID: ami-96c6f8f7 <9> - serviceEndpoints: <10> - - name: ec2 - url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com -endif::vpc,restricted,aws-outposts[] -ifdef::vpc,restricted[] -ifndef::openshift-origin[] -fips: false <13> -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -endif::vpc,restricted[] -ifndef::vpc,restricted[] -ifndef::openshift-origin,aws-outposts[] -fips: false <11> -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin,aws-outposts[] -ifdef::openshift-origin,aws-outposts[] -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin,aws-outposts[] -endif::vpc,restricted[] -ifdef::private[] -ifndef::openshift-origin[] -publish: Internal <15> -endif::openshift-origin[] -endif::private[] -ifndef::restricted[] -pullSecret: '{"auths": ...}' <1> -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths":{"": {"auth": "","email": "you@example.com"}}}' <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths":{"": {"auth": "","email": "you@example.com"}}}' <14> -endif::openshift-origin[] -endif::restricted[] -ifdef::secret[] -ifndef::openshift-origin[] -additionalTrustBundle: | <16> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -endif::openshift-origin[] -endif::secret[] -ifdef::private[] -ifdef::openshift-origin[] -publish: Internal <14> -endif::openshift-origin[] -endif::private[] -ifdef::secret[] -ifdef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -endif::openshift-origin[] -endif::secret[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <16> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -imageContentSources: <17> -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -imageContentSources: <16> -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -endif::restricted[] ----- -ifndef::gov,secret,china[] -<1> Required. The installation program prompts you for this value. -endif::gov,secret,china[] -ifdef::gov,secret,china[] -<1> Required. -endif::gov,secret,china[] -<2> Optional: Add this parameter to force the Cloud Credential Operator (CCO) to use the specified mode, instead of having the CCO dynamically try to determine the capabilities of the credentials. For details about CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content. -<3> If you do not provide these parameters and values, the installation program -provides the default value. -<4> The `controlPlane` section is a single mapping, but the `compute` section is a -sequence of mappings. To meet the requirements of the different data structures, -the first line of the `compute` section must begin with a hyphen, `-`, and the -first line of the `controlPlane` section must not. Only one control plane pool is used. -<5> Whether to enable or disable simultaneous multithreading, or -`hyperthreading`. By default, simultaneous multithreading is enabled -to increase the performance of your machines' cores. You can disable it by -setting the parameter value to `Disabled`. If you disable simultaneous -multithreading in some cluster machines, you must disable it in all cluster -machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. Use larger -instance types, such as `m4.2xlarge` or `m5.2xlarge`, for your machines if you -disable simultaneous multithreading. -==== -ifndef::aws-outposts[] -<6> To configure faster storage for etcd, especially for larger clusters, set the storage type as `io1` and set `iops` to `2000`. -<7> Whether to require the link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html[Amazon EC2 Instance Metadata Service v2] (IMDSv2). To require IMDSv2, set the parameter value to `Required`. To allow the use of both IMDSv1 and IMDSv2, set the parameter value to `Optional`. If no value is specified, both IMDSv1 and IMDSv2 are allowed. -+ -[NOTE] -==== -The IMDS configuration for control plane machines that is set during cluster installation can only be changed by using the AWS CLI. The IMDS configuration for compute machines can be changed by using compute machine sets. -==== -<8> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -endif::aws-outposts[] -ifdef::aws-outposts[] -<6> For compute instances running in an AWS Outpost instance, specify a supported instance type in the AWS Outpost instance. -<7> For compute instances running in AWS Outpost instance, specify the Availability Zone where the Outpost instance is located. -<8> For compute instances running in AWS Outpost instance, specify volume type gp2, to avoid using gp3 volume type which is not supported. -<9> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<10> If you provide your own VPC, specify subnets for each availability zone that your cluster uses. -endif::aws-outposts[] -ifdef::vpc,restricted[] -<9> If you provide your own VPC, specify subnets for each availability zone that your cluster uses. -<10> The ID of the AMI used to boot machines for the cluster. If set, the AMI -must belong to the same region as the cluster. -<11> The AWS service endpoints. Custom endpoints are required when installing to -an unknown AWS region. The endpoint URL must use the `https` protocol and the -host must trust the certificate. -<12> The ID of your existing Route 53 private hosted zone. Providing an existing hosted zone requires that you supply your own VPC and the hosted zone is already associated with the VPC prior to installing your cluster. If undefined, the installation program creates a new hosted zone. -ifndef::openshift-origin[] -<13> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vpc,restricted[] -ifndef::vpc,restricted,aws-outposts[] -<9> The ID of the AMI used to boot machines for the cluster. If set, the AMI must belong to the same region as the cluster. -<10> The AWS service endpoints. Custom endpoints are required when installing to an unknown AWS region. The endpoint URL must use the `https` protocol and the host must trust the certificate. -ifndef::openshift-origin[] -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> You can optionally provide the `sshKey` value that you use to access the -machines in your cluster. -endif::openshift-origin[] -endif::vpc,restricted,aws-outposts[] -ifdef::aws-outposts[] -<11> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::aws-outposts[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::private[] -ifndef::openshift-origin[] -<15> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::private[] -ifdef::secret[] -ifndef::openshift-origin[] -<16> The custom CA certificate. This is required when deploying to the SC2S or C2S Regions because the AWS API requires a custom CA trust bundle. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> The custom CA certificate. This is required when deploying to the SC2S or C2S Regions because the AWS API requires a custom CA trust bundle. -endif::openshift-origin[] -endif::secret[] -ifdef::restricted[] -ifndef::openshift-origin[] -<15> For ``, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For ``, -specify the base64-encoded user name and password for your mirror registry. -<16> Provide the contents of the certificate file that you used for your mirror registry. -<17> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> For ``, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For ``, -specify the base64-encoded user name and password for your mirror registry. -<15> Provide the contents of the certificate file that you used for your mirror registry. -<16> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -endif::restricted[] - -ifeval::["{context}" == "installing-aws-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" != "installing-aws-network-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!vpc: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!vpc: -:!private: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!vpc: -:!private: -:!gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!vpc: -:!private: -:!secret: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:!vpc: -:!private: -:!china: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws-outposts: -endif::[] diff --git a/modules/installation-aws-delete-cluster.adoc b/modules/installation-aws-delete-cluster.adoc deleted file mode 100644 index 1fc057646abc..000000000000 --- a/modules/installation-aws-delete-cluster.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/uninstalling-cluster-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-delete-cluster"] -= Deleting a cluster with a configured AWS Local Zone infrastructure - -After you install a cluster on Amazon Web Services (AWS) into an existing Virtual Private Cloud (VPC), and you set subnets for each Local Zone location, you can delete the cluster and any AWS resources associated with it. - -The example in the procedure assumes that you created a VPC and its subnets by using a CloudFormation template. - -.Prerequisites - -* You know the name of the CloudFormation stacks, `` and ``, that were used during the creation of the network. You need the name of the stack to delete the cluster. -* You have access rights to the directory that contains the installation files that were created by the installation program. -* Your account includes a policy that provides you with permissions to delete the CloudFormation stack. - -.Procedure - -. Change to the directory that contains the stored installation program, and delete the cluster by using the `destroy cluster` command: -+ -[source,terminal] ----- -$ ./openshift-install destroy cluster --dir \//<1> - --log-level=debug <2> ----- -<1> For ``, specify the directory that stored any files created by the installation program. -<2> To view different log details, specify `error`, `info`, or `warn` instead of `debug`. - -. Delete the CloudFormation stack for the Local Zone subnet: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name ----- - -. Delete the stack of resources that represent the VPC: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name ----- - -.Verification - -* Check that you removed the stack resources by issuing the following commands in the AWS CLI. The AWS CLI outputs that no template component exists. -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name ----- -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name ----- \ No newline at end of file diff --git a/modules/installation-aws-editing-manifests.adoc b/modules/installation-aws-editing-manifests.adoc deleted file mode 100644 index 44c36e1ed291..000000000000 --- a/modules/installation-aws-editing-manifests.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -:_content-type: PROCEDURE -[id="installation-aws-creating-manifests_{context}"] -= Generating manifest files - -Use the installation program to generate a set of manifest files in the assets directory. Manifest files are required to specify the AWS Outposts subnets to use for worker machines, and to specify settings required by the network provider. - -If you plan to reuse the `install-config.yaml` file, create a backup file before you generate the manifest files. - -.Procedure - -. Optional: Create a backup copy of the `install-config.yaml` file: -+ -[source,terminal] ----- -$ cp install-config.yaml install-config.yaml.backup ----- - -. Generate a set of manifests in your assets directory: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir ----- -+ -This command displays the following messages. -+ -.Example output -[source,terminal] ----- -INFO Consuming Install Config from target directory -INFO Manifests created in: /manifests and /openshift ----- -+ -The command generates the following manifest files: -+ -.Example output -[source,terminal] ----- -$ tree -. -├── manifests -│  ├── cluster-config.yaml -│  ├── cluster-dns-02-config.yml -│  ├── cluster-infrastructure-02-config.yml -│  ├── cluster-ingress-02-config.yml -│  ├── cluster-network-01-crd.yml -│  ├── cluster-network-02-config.yml -│  ├── cluster-proxy-01-config.yaml -│  ├── cluster-scheduler-02-config.yml -│  ├── cvo-overrides.yaml -│  ├── kube-cloud-config.yaml -│  ├── kube-system-configmap-root-ca.yaml -│  ├── machine-config-server-tls-secret.yaml -│  └── openshift-config-secret-pull-secret.yaml -└── openshift - ├── 99_cloud-creds-secret.yaml - ├── 99_kubeadmin-password-secret.yaml - ├── 99_openshift-cluster-api_master-machines-0.yaml - ├── 99_openshift-cluster-api_master-machines-1.yaml - ├── 99_openshift-cluster-api_master-machines-2.yaml - ├── 99_openshift-cluster-api_master-user-data-secret.yaml - ├── 99_openshift-cluster-api_worker-machineset-0.yaml - ├── 99_openshift-cluster-api_worker-user-data-secret.yaml - ├── 99_openshift-machineconfig_99-master-ssh.yaml - ├── 99_openshift-machineconfig_99-worker-ssh.yaml - ├── 99_role-cloud-creds-secret-reader.yaml - └── openshift-install-manifests.yaml - ----- - -[id="installation-aws-editing-manifests_{context}"] -== Modifying manifest files - -[NOTE] -==== -The AWS Outposts environments has the following limitations which require manual modification in the manifest generated files: - -* The maximum transmission unit (MTU) of a network connection is the size, in bytes, of the largest permissible packet that can be passed over the connection. The Outpost service link supports a maximum packet size of 1300 bytes. For more information about the service link, see link:https://docs.aws.amazon.com/outposts/latest/userguide/region-connectivity.html[Outpost connectivity to AWS Regions] - -You will find more information about how to change these values below. -==== - -* Use Outpost Subnet for workers `machineset` -+ -Modify the following file: -/openshift/99_openshift-cluster-api_worker-machineset-0.yaml -Find the subnet ID and replace it with the ID of the private subnet created in the Outpost. As a result, all the worker machines will be created in the Outpost. - -* Specify MTU value for the Network Provider -+ -Outpost service links support a maximum packet size of 1300 bytes. It's required to modify the MTU of the Network Provider to follow this requirement. -Create a new file under manifests directory, named cluster-network-03-config.yml -+ -If OpenShift SDN network provider is used, set the MTU value to 1250 -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - openshiftSDNConfig: - mtu: 1250 ----- -+ -If OVN-Kubernetes network provider is used, set the MTU value to 1200 -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - mtu: 1200 ----- diff --git a/modules/installation-aws-iam-policies-about.adoc b/modules/installation-aws-iam-policies-about.adoc deleted file mode 100644 index e604cf5ec274..000000000000 --- a/modules/installation-aws-iam-policies-about.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: CONCEPT -[id="iam-policies-and-aws-authentication_{context}"] -= IAM Policies and AWS authentication - -By default, the installation program creates instance profiles for the bootstrap, control plane, and compute instances with the necessary permissions for the cluster to operate. - -However, you can create your own IAM roles and specify them as part of the installation process. You might need to specify your own roles to deploy the cluster or to manage the cluster after installation. For example: - -* Your organization's security policies require that you use a more restrictive set of permissions to install the cluster. -* After the installation, the cluster is configured with an Operator that requires access to additional services. - -If you choose to specify your own IAM roles, you can take the following steps: - -* Begin with the default policies and adapt as required. For more information, see "Default permissions for IAM instance profiles". -* Use the AWS Identity and Access Management Access Analyzer (IAM Access Analyzer) to create a policy template that is based on the cluster's activity. For more information see, "Using AWS IAM Analyzer to create policy templates". diff --git a/modules/installation-aws-iam-user.adoc b/modules/installation-aws-iam-user.adoc deleted file mode 100644 index ebd7cc101fff..000000000000 --- a/modules/installation-aws-iam-user.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="installation-aws-iam-user_{context}"] -= Creating an IAM user - -Each Amazon Web Services (AWS) account contains a root user account that is -based on the email address you used to create the account. This is a -highly-privileged account, and it is recommended to use it for only initial -account and billing configuration, creating an initial set of users, and -securing the account. - -Before you install {product-title}, create a secondary IAM -administrative user. As you complete the -link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html[Creating an IAM User in Your AWS Account] -procedure in the AWS documentation, set the following options: - -.Procedure - -. Specify the IAM user name and select `Programmatic access`. - -. Attach the `AdministratorAccess` policy to ensure that the account has -sufficient permission to create the cluster. This policy provides the cluster -with the ability to grant credentials to each {product-title} component. The -cluster grants the components only the credentials that they require. -+ -[NOTE] -==== -While it is possible to create a policy that grants the all of the required -AWS permissions and attach it to the user, this is not the preferred option. -The cluster will not have the ability to grant additional credentials to -individual components, so the same credentials are used by all components. -==== - -. Optional: Add metadata to the user by attaching tags. - -. Confirm that the user name that you specified is granted the -`AdministratorAccess` policy. - -. Record the access key ID and secret access key values. You must use these -values when you configure your local machine to run the installation program. -+ -[IMPORTANT] -==== -You cannot use a temporary session token that you generated while using a -multi-factor authentication device to authenticate to AWS when you deploy a -cluster. The cluster continues to use your current AWS credentials to -create AWS resources for the entire life of the cluster, so you must -use key-based, long-lived credentials. -==== diff --git a/modules/installation-aws-limits.adoc b/modules/installation-aws-limits.adoc deleted file mode 100644 index 1db3b48fe331..000000000000 --- a/modules/installation-aws-limits.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-limits_{context}"] -= AWS account limits - -The {product-title} cluster uses a number of Amazon Web Services (AWS) -components, and the default -link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[Service Limits] -affect your ability to install {product-title} clusters. If you use certain -cluster configurations, deploy your cluster in certain AWS regions, or -run multiple clusters from your account, you might need -to request additional resources for your AWS account. - -The following table summarizes the AWS components whose limits can impact your -ability to install and run {product-title} clusters. - -[cols="2a,3a,3a,8a",options="header"] -|=== -|Component |Number of clusters available by default| Default AWS limit |Description - -|Instance Limits -|Varies -|Varies -|By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane nodes -* Three worker nodes - -These instance type counts are within a new account's default limit. To deploy -more worker nodes, enable autoscaling, deploy large workloads, or use a -different instance type, review your account limits to ensure that your cluster -can deploy the machines that you need. - -In most regions, the worker machines use an `m6i.large` instance -and the bootstrap and control plane machines use `m6i.xlarge` instances. In some regions, including -all regions that do not support these instance types, `m5.large` and `m5.xlarge` -instances are used instead. - -|Elastic IPs (EIPs) -|0 to 1 -|5 EIPs per account -|To provision the cluster in a highly available configuration, the installation program -creates a public and private subnet for each -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zone within a region]. -Each private subnet requires a -link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT Gateway], -and each NAT gateway requires a separate -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html[elastic IP]. -Review the -link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to -determine how many availability zones are in each region. To take advantage of -the default high availability, install the cluster in a region with at least -three availability zones. To install a cluster in a region with more than five -availability zones, you must increase the EIP limit. -[IMPORTANT] -==== -To use the `us-east-1` region, you must increase the EIP limit for your account. -==== - -|Virtual Private Clouds (VPCs) -|5 -|5 VPCs per region -|Each cluster creates its own VPC. - -|Elastic Load Balancing (ELB/NLB) -|3 -|20 per region -|By default, each cluster creates internal and external network load balancers for the master -API server and a single Classic Load Balancer for the router. Deploying -more Kubernetes `Service` objects with type `LoadBalancer` will create additional -link:https://aws.amazon.com/elasticloadbalancing/[load balancers]. - - -|NAT Gateways -|5 -|5 per availability zone -|The cluster deploys one NAT gateway in each availability zone. - -|Elastic Network Interfaces (ENIs) -|At least 12 -|350 per region -|The default installation creates 21 ENIs and an ENI for each availability zone -in your region. For example, the `us-east-1` region contains six availability -zones, so a cluster that is deployed in that zone uses 27 ENIs. Review the -link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to -determine how many availability zones are in each region. - -Additional ENIs are created for additional machines and ELB load balancers -that are created by cluster usage and deployed workloads. - -|VPC Gateway -|20 -|20 per account -|Each cluster creates a single VPC Gateway for S3 access. - - -|S3 buckets -|99 -|100 buckets per account -|Because the installation process creates a temporary bucket and the registry -component in each cluster creates a bucket, you can create only 99 -{product-title} clusters per AWS account. - -|Security Groups -|250 -|2,500 per account -|Each cluster creates 10 distinct security groups. - | Fail, optionally surfacing response body to the user -|=== diff --git a/modules/installation-aws-marketplace-subscribe.adoc b/modules/installation-aws-marketplace-subscribe.adoc deleted file mode 100644 index aba198cb16bf..000000000000 --- a/modules/installation-aws-marketplace-subscribe.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc - -ifeval::["{context}" == "installing-aws-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:upi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-marketplace-subscribe_{context}"] -= Obtaining an AWS Marketplace image -If you are deploying an {product-title} cluster using an AWS Marketplace image, you must first subscribe through AWS. Subscribing to the offer provides you with the AMI ID that the installation program uses to deploy worker nodes. - -.Prerequisites - -* You have an AWS account to purchase the offer. This account does not have to be the same account that is used to install the cluster. - -.Procedure - -. Complete the {product-title} subscription from the link:https://aws.amazon.com/marketplace/fulfillment?productId=59ead7de-2540-4653-a8b0-fa7926d5c845[AWS Marketplace]. -ifdef::ipi[] -. Record the AMI ID for your specific region. As part of the installation process, you must update the `install-config.yaml` file with this value before deploying the cluster. -endif::ipi[] -ifdef::upi[] -. Record the AMI ID for your specific region. If you use the CloudFormation template to deploy your worker nodes, you must update the `worker0.type.properties.ImageID` parameter with this value. -endif::upi[] - -ifdef::ipi[] -.Sample `install-config.yaml` file with AWS Marketplace worker nodes - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - amiID: ami-06c4d345f7c207239 <1> - type: m5.4xlarge - replicas: 3 -metadata: - name: test-cluster -platform: - aws: - region: us-east-2 <2> -sshKey: ssh-ed25519 AAAA... -pullSecret: '{"auths": ...}' ----- -<1> The AMI ID from your AWS Marketplace subscription. -<2> Your AMI ID is associated with a specific AWS region. When creating the installation configuration file, ensure that you select the same AWS region that you specified when configuring your subscription. -endif::ipi[] - -ifeval::["{context}" == "installing-aws-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!upi: -endif::[] diff --git a/modules/installation-aws-marketplace.adoc b/modules/installation-aws-marketplace.adoc deleted file mode 100644 index 295c20428b48..000000000000 --- a/modules/installation-aws-marketplace.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: CONCEPT -[id="installation-aws-marketplace_{context}"] -= Supported AWS Marketplace regions - -Installing an {product-title} cluster using an AWS Marketplace image is available to customers who purchase the offer in North America. - -While the offer must be purchased in North America, you can deploy the cluster to any of the following supported paritions: - -* Public -* GovCloud - -[NOTE] -==== -Deploying a {product-title} cluster using an AWS Marketplace image is not supported for the AWS secret regions or China regions. -==== diff --git a/modules/installation-aws-permissions-iam-roles.adoc b/modules/installation-aws-permissions-iam-roles.adoc deleted file mode 100644 index 478df830636f..000000000000 --- a/modules/installation-aws-permissions-iam-roles.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-permissions-iam-roles_{context}"] -= Default permissions for IAM instance profiles - -By default, the installation program creates IAM instance profiles for the bootstrap, control plane and worker instances with the necessary permissions for the cluster to operate. - -The following lists specify the default permissions for control plane and compute machines: - -.Default IAM role permissions for control plane instance profiles -[%collapsible] -==== -* `ec2:AttachVolume` -* `ec2:AuthorizeSecurityGroupIngress` -* `ec2:CreateSecurityGroup` -* `ec2:CreateTags` -* `ec2:CreateVolume` -* `ec2:DeleteSecurityGroup` -* `ec2:DeleteVolume` -* `ec2:Describe*` -* `ec2:DetachVolume` -* `ec2:ModifyInstanceAttribute` -* `ec2:ModifyVolume` -* `ec2:RevokeSecurityGroupIngress` -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:AttachLoadBalancerToSubnets` -* `elasticloadbalancing:ApplySecurityGroupsToLoadBalancer` -* `elasticloadbalancing:CreateListener` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateLoadBalancerPolicy` -* `elasticloadbalancing:CreateLoadBalancerListeners` -* `elasticloadbalancing:CreateTargetGroup` -* `elasticloadbalancing:ConfigureHealthCheck` -* `elasticloadbalancing:DeleteListener` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeleteLoadBalancerListeners` -* `elasticloadbalancing:DeleteTargetGroup` -* `elasticloadbalancing:DeregisterInstancesFromLoadBalancer` -* `elasticloadbalancing:DeregisterTargets` -* `elasticloadbalancing:Describe*` -* `elasticloadbalancing:DetachLoadBalancerFromSubnets` -* `elasticloadbalancing:ModifyListener` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:ModifyTargetGroup` -* `elasticloadbalancing:ModifyTargetGroupAttributes` -* `elasticloadbalancing:RegisterInstancesWithLoadBalancer` -* `elasticloadbalancing:RegisterTargets` -* `elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer` -* `elasticloadbalancing:SetLoadBalancerPoliciesOfListener` -* `kms:DescribeKey` -==== - -.Default IAM role permissions for compute instance profiles -[%collapsible] -==== -* `ec2:DescribeInstances` -* `ec2:DescribeRegions` -==== diff --git a/modules/installation-aws-permissions.adoc b/modules/installation-aws-permissions.adoc deleted file mode 100644 index 8e0baa5d11a8..000000000000 --- a/modules/installation-aws-permissions.adoc +++ /dev/null @@ -1,293 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-account.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-permissions_{context}"] -= Required AWS permissions for the IAM user - -[NOTE] -==== -Your IAM user must have the permission `tag:GetResources` in the region `us-east-1` to delete the base cluster resources. As part of the AWS API requirement, the {product-title} installation program performs various actions in this region. -==== - -When you attach the `AdministratorAccess` policy to the IAM user that you create in Amazon Web Services (AWS), -you grant that user all of the required permissions. To deploy all components of an {product-title} -cluster, the IAM user requires the following permissions: - -.Required EC2 permissions for installation -[%collapsible] -==== -* `ec2:AuthorizeSecurityGroupEgress` -* `ec2:AuthorizeSecurityGroupIngress` -* `ec2:CopyImage` -* `ec2:CreateNetworkInterface` -* `ec2:AttachNetworkInterface` -* `ec2:CreateSecurityGroup` -* `ec2:CreateTags` -* `ec2:CreateVolume` -* `ec2:DeleteSecurityGroup` -* `ec2:DeleteSnapshot` -* `ec2:DeleteTags` -* `ec2:DeregisterImage` -* `ec2:DescribeAccountAttributes` -* `ec2:DescribeAddresses` -* `ec2:DescribeAvailabilityZones` -* `ec2:DescribeDhcpOptions` -* `ec2:DescribeImages` -* `ec2:DescribeInstanceAttribute` -* `ec2:DescribeInstanceCreditSpecifications` -* `ec2:DescribeInstances` -* `ec2:DescribeInstanceTypes` -* `ec2:DescribeInternetGateways` -* `ec2:DescribeKeyPairs` -* `ec2:DescribeNatGateways` -* `ec2:DescribeNetworkAcls` -* `ec2:DescribeNetworkInterfaces` -* `ec2:DescribePrefixLists` -* `ec2:DescribeRegions` -* `ec2:DescribeRouteTables` -* `ec2:DescribeSecurityGroups` -* `ec2:DescribeSubnets` -* `ec2:DescribeTags` -* `ec2:DescribeVolumes` -* `ec2:DescribeVpcAttribute` -* `ec2:DescribeVpcClassicLink` -* `ec2:DescribeVpcClassicLinkDnsSupport` -* `ec2:DescribeVpcEndpoints` -* `ec2:DescribeVpcs` -* `ec2:GetEbsDefaultKmsKeyId` -* `ec2:ModifyInstanceAttribute` -* `ec2:ModifyNetworkInterfaceAttribute` -* `ec2:RevokeSecurityGroupEgress` -* `ec2:RevokeSecurityGroupIngress` -* `ec2:RunInstances` -* `ec2:TerminateInstances` -==== - -.Required permissions for creating network resources during installation -[%collapsible] -==== -* `ec2:AllocateAddress` -* `ec2:AssociateAddress` -* `ec2:AssociateDhcpOptions` -* `ec2:AssociateRouteTable` -* `ec2:AttachInternetGateway` -* `ec2:CreateDhcpOptions` -* `ec2:CreateInternetGateway` -* `ec2:CreateNatGateway` -* `ec2:CreateRoute` -* `ec2:CreateRouteTable` -* `ec2:CreateSubnet` -* `ec2:CreateVpc` -* `ec2:CreateVpcEndpoint` -* `ec2:ModifySubnetAttribute` -* `ec2:ModifyVpcAttribute` - -[NOTE] -===== -If you use an existing VPC, your account does not require these permissions for creating network resources. -===== -==== - -.Required Elastic Load Balancing permissions (ELB) for installation -[%collapsible] -==== -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:ApplySecurityGroupsToLoadBalancer` -* `elasticloadbalancing:AttachLoadBalancerToSubnets` -* `elasticloadbalancing:ConfigureHealthCheck` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateLoadBalancerListeners` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeregisterInstancesFromLoadBalancer` -* `elasticloadbalancing:DescribeInstanceHealth` -* `elasticloadbalancing:DescribeLoadBalancerAttributes` -* `elasticloadbalancing:DescribeLoadBalancers` -* `elasticloadbalancing:DescribeTags` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:RegisterInstancesWithLoadBalancer` -* `elasticloadbalancing:SetLoadBalancerPoliciesOfListener` -==== - -.Required Elastic Load Balancing permissions (ELBv2) for installation -[%collapsible] -==== -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:CreateListener` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateTargetGroup` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeregisterTargets` -* `elasticloadbalancing:DescribeListeners` -* `elasticloadbalancing:DescribeLoadBalancerAttributes` -* `elasticloadbalancing:DescribeLoadBalancers` -* `elasticloadbalancing:DescribeTargetGroupAttributes` -* `elasticloadbalancing:DescribeTargetHealth` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:ModifyTargetGroup` -* `elasticloadbalancing:ModifyTargetGroupAttributes` -* `elasticloadbalancing:RegisterTargets` -==== - -.Required IAM permissions for installation -[%collapsible] -==== -* `iam:AddRoleToInstanceProfile` -* `iam:CreateInstanceProfile` -* `iam:CreateRole` -* `iam:DeleteInstanceProfile` -* `iam:DeleteRole` -* `iam:DeleteRolePolicy` -* `iam:GetInstanceProfile` -* `iam:GetRole` -* `iam:GetRolePolicy` -* `iam:GetUser` -* `iam:ListInstanceProfilesForRole` -* `iam:ListRoles` -* `iam:ListUsers` -* `iam:PassRole` -* `iam:PutRolePolicy` -* `iam:RemoveRoleFromInstanceProfile` -* `iam:SimulatePrincipalPolicy` -* `iam:TagRole` - -[NOTE] -===== -If you have not created a load balancer in your AWS account, the IAM user also requires the `iam:CreateServiceLinkedRole` permission. -===== -==== - -.Required Route 53 permissions for installation -[%collapsible] -==== -* `route53:ChangeResourceRecordSets` -* `route53:ChangeTagsForResource` -* `route53:CreateHostedZone` -* `route53:DeleteHostedZone` -* `route53:GetChange` -* `route53:GetHostedZone` -* `route53:ListHostedZones` -* `route53:ListHostedZonesByName` -* `route53:ListResourceRecordSets` -* `route53:ListTagsForResource` -* `route53:UpdateHostedZoneComment` -==== - -.Required S3 permissions for installation -[%collapsible] -==== -* `s3:CreateBucket` -* `s3:DeleteBucket` -* `s3:GetAccelerateConfiguration` -* `s3:GetBucketAcl` -* `s3:GetBucketCors` -* `s3:GetBucketLocation` -* `s3:GetBucketLogging` -* `s3:GetBucketPolicy` -* `s3:GetBucketObjectLockConfiguration` -* `s3:GetBucketReplication` -* `s3:GetBucketRequestPayment` -* `s3:GetBucketTagging` -* `s3:GetBucketVersioning` -* `s3:GetBucketWebsite` -* `s3:GetEncryptionConfiguration` -* `s3:GetLifecycleConfiguration` -* `s3:GetReplicationConfiguration` -* `s3:ListBucket` -* `s3:PutBucketAcl` -* `s3:PutBucketTagging` -* `s3:PutEncryptionConfiguration` -==== - -.S3 permissions that cluster Operators require -[%collapsible] -==== -* `s3:DeleteObject` -* `s3:GetObject` -* `s3:GetObjectAcl` -* `s3:GetObjectTagging` -* `s3:GetObjectVersion` -* `s3:PutObject` -* `s3:PutObjectAcl` -* `s3:PutObjectTagging` -==== - -.Required permissions to delete base cluster resources -[%collapsible] -==== -* `autoscaling:DescribeAutoScalingGroups` -* `ec2:DeletePlacementGroup` -* `ec2:DeleteNetworkInterface` -* `ec2:DeleteVolume` -* `elasticloadbalancing:DeleteTargetGroup` -* `elasticloadbalancing:DescribeTargetGroups` -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:ListAttachedRolePolicies` -* `iam:ListInstanceProfiles` -* `iam:ListRolePolicies` -* `iam:ListUserPolicies` -* `s3:DeleteObject` -* `s3:ListBucketVersions` -* `tag:GetResources` -==== - -.Required permissions to delete network resources -[%collapsible] -==== -* `ec2:DeleteDhcpOptions` -* `ec2:DeleteInternetGateway` -* `ec2:DeleteNatGateway` -* `ec2:DeleteRoute` -* `ec2:DeleteRouteTable` -* `ec2:DeleteSubnet` -* `ec2:DeleteVpc` -* `ec2:DeleteVpcEndpoints` -* `ec2:DetachInternetGateway` -* `ec2:DisassociateRouteTable` -* `ec2:ReleaseAddress` -* `ec2:ReplaceRouteTableAssociation` - -[NOTE] -===== -If you use an existing VPC, your account does not require these permissions to delete network resources. Instead, your account only requires the `tag:UntagResources` permission to delete network resources. -===== -==== - -.Required permissions to delete a cluster with shared instance roles -[%collapsible] -==== -* `iam:UntagRole` -==== - -.Additional IAM and S3 permissions that are required to create manifests -[%collapsible] -==== -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:DeleteUserPolicy` -* `iam:GetUserPolicy` -* `iam:ListAccessKeys` -* `iam:PutUserPolicy` -* `iam:TagUser` -* `s3:PutBucketPublicAccessBlock` -* `s3:GetBucketPublicAccessBlock` -* `s3:PutLifecycleConfiguration` -* `s3:HeadBucket` -* `s3:ListBucketMultipartUploads` -* `s3:AbortMultipartUpload` - -[NOTE] -===== -If you are managing your cloud provider credentials with mint mode, the IAM user also requires the `iam:CreateAccessKey` and `iam:CreateUser` permissions. -===== -==== - -.Optional permissions for instance and quota checks for installation -[%collapsible] -==== -* `ec2:DescribeInstanceTypeOfferings` -* `servicequotas:ListAWSDefaultServiceQuotas` -==== diff --git a/modules/installation-aws-regions-with-no-ami.adoc b/modules/installation-aws-regions-with-no-ami.adoc deleted file mode 100644 index d465fc432b86..000000000000 --- a/modules/installation-aws-regions-with-no-ami.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] - -[id="installation-aws-regions-with-no-ami_{context}"] -ifndef::aws-china,aws-secret[] -= AWS regions without a published {op-system} AMI -endif::aws-china,aws-secret[] - -ifdef::aws-china,aws-secret[] -= Installation requirements -endif::aws-china,aws-secret[] - -ifndef::aws-china,aws-secret[] -You can deploy an {product-title} cluster to Amazon Web Services (AWS) regions -without native support for a {op-system-first} Amazon Machine Image (AMI) or the -AWS software development kit (SDK). If a -published AMI is not available for an AWS region, you can upload a custom AMI -prior to installing the cluster. - -If you are deploying to a region not supported by the AWS SDK -and you do not specify a custom AMI, the installation program -copies the `us-east-1` AMI to the user account automatically. Then the -installation program creates the control plane machines with encrypted EBS -volumes using the default or user-specified Key Management Service (KMS) key. -This allows the AMI to follow the same process workflow as published {op-system} -AMIs. - -A region without native support for an {op-system} AMI is not available to -select from the terminal during cluster creation because it is not published. -However, you can install to this region by configuring the custom AMI in the -`install-config.yaml` file. -endif::aws-china,aws-secret[] - -ifdef::aws-china,aws-secret[] -ifdef::aws-china[Red Hat does not publish a {op-system-first} Amazon Machine Image (AMI) for the AWS China regions.] -ifdef::aws-secret[Red Hat does not publish a {op-system-first} Amzaon Machine Image for the AWS Secret and Top Secret Regions.] - -Before you can install the cluster, you must: - -* Upload a custom {op-system} AMI. -* Manually create the installation configuration file (`install-config.yaml`). -* Specify the AWS region, and the accompanying custom AMI, in the installation configuration file. - -You cannot use the {product-title} installation program to create the installation configuration file. The installer does not list an AWS region without native support for an {op-system} AMI. - -ifdef::aws-secret[] -[IMPORTANT] -==== -You must also define a custom CA certificate in the `additionalTrustBundle` field of the `install-config.yaml` file because the AWS API requires a custom CA trust bundle. To allow the installation program to access the AWS API, the CA certificates must also be defined on the machine that runs the installation program. You must add the CA bundle to the trust store on the machine, use the `AWS_CA_BUNDLE` environment variable, or define the CA bundle in the link:https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-ca_bundle.html[`ca_bundle`] field of the AWS config file. -==== -endif::aws-secret[] - -endif::aws-china,aws-secret[] - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] diff --git a/modules/installation-aws-regions.adoc b/modules/installation-aws-regions.adoc deleted file mode 100644 index b00ea7fadb89..000000000000 --- a/modules/installation-aws-regions.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-regions_{context}"] -= Supported AWS regions - -You can deploy an {product-title} cluster to the following regions. - -[NOTE] -==== -Your IAM user must have the permission `tag:GetResources` in the region `us-east-1` to delete the base cluster resources. As part of the AWS API requirement, the {product-title} installation program performs various actions in this region. -==== - -[id="installation-aws-public_{context}"] -== AWS public regions - -The following AWS public regions are supported: - -* `af-south-1` (Cape Town) -* `ap-east-1` (Hong Kong) -* `ap-northeast-1` (Tokyo) -* `ap-northeast-2` (Seoul) -* `ap-northeast-3` (Osaka) -* `ap-south-1` (Mumbai) -* `ap-south-2` (Hyderabad) -* `ap-southeast-1` (Singapore) -* `ap-southeast-2` (Sydney) -* `ap-southeast-3` (Jakarta) -* `ap-southeast-4` (Melbourne) -* `ca-central-1` (Central) -* `eu-central-1` (Frankfurt) -* `eu-central-2` (Zurich) -* `eu-north-1` (Stockholm) -* `eu-south-1` (Milan) -* `eu-south-2` (Spain) -* `eu-west-1` (Ireland) -* `eu-west-2` (London) -* `eu-west-3` (Paris) -* `me-central-1` (UAE) -* `me-south-1` (Bahrain) -* `sa-east-1` (São Paulo) -* `us-east-1` (N. Virginia) -* `us-east-2` (Ohio) -* `us-west-1` (N. California) -* `us-west-2` (Oregon) - -[id="installation-aws-govcloud_{context}"] -== AWS GovCloud regions - -The following AWS GovCloud regions are supported: - -* `us-gov-west-1` -* `us-gov-east-1` - -[id="installation-aws-c2s_{context}"] -== AWS SC2S and C2S secret regions - -The following AWS secret regions are supported: - -* `us-isob-east-1` Secret Commercial Cloud Services (SC2S) -* `us-iso-east-1` Commercial Cloud Services (C2S) - -[id="installation-aws-china_{context}"] -== AWS China regions - -The following AWS China regions are supported: - -* `cn-north-1` (Beijing) -* `cn-northwest-1` (Ningxia) diff --git a/modules/installation-aws-route53.adoc b/modules/installation-aws-route53.adoc deleted file mode 100644 index 14c632a66268..000000000000 --- a/modules/installation-aws-route53.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="installation-aws-route53_{context}"] -= Configuring Route 53 - -To install {product-title}, the Amazon Web Services (AWS) account you use must -have a dedicated public hosted zone in your Route 53 service. This zone must be -authoritative for the domain. The Route 53 service provides -cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an existing domain and -registrar or obtain a new one through AWS or another source. -+ -[NOTE] -==== -If you purchase a new domain through AWS, it takes time for the relevant DNS -changes to propagate. For more information about purchasing domains -through AWS, see -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar.html[Registering Domain Names Using Amazon Route 53] -in the AWS documentation. -==== - -. If you are using an existing domain and registrar, migrate its DNS to AWS. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html[Making Amazon Route 53 the DNS Service for an Existing Domain] -in the AWS documentation. - -. Create a public hosted zone for your domain or subdomain. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html[Creating a Public Hosted Zone] -in the AWS documentation. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, -such as `clusters.openshiftcorp.com`. - -. Extract the new authoritative name servers from the hosted zone records. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/GetInfoAboutHostedZone.html[Getting the Name Servers for a Public Hosted Zone] -in the AWS documentation. - -. Update the registrar records for the AWS Route 53 name servers that your domain -uses. For example, if you registered your domain to a Route 53 service in a -different accounts, see the following topic in the AWS documentation: -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-name-servers-glue-records.html#domain-name-servers-glue-records-procedure[Adding or Changing Name Servers or Glue Records]. - -. If you are using a subdomain, add its delegation records to the parent domain. This gives Amazon Route 53 responsibility for the subdomain. Follow the delegation procedure outlined by the DNS provider of the parent domain. See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html[Creating a subdomain that uses Amazon Route 53 as the DNS service without migrating the parent domain] in the AWS documentation for an example high level procedure. diff --git a/modules/installation-aws-security-groups.adoc b/modules/installation-aws-security-groups.adoc deleted file mode 100644 index aefe2e4920a0..000000000000 --- a/modules/installation-aws-security-groups.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-localzone.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -ifeval::["{context}" == "installing-aws-localzone"] -:localzone: -endif::[] - -:_content-type: CONCEPT -[id="installation-aws-security-groups_{context}"] -= AWS security groups - -By default, the installation program creates and attaches security groups to control plane and compute machines. The rules associated with the default security groups cannot be modified. - -However, you can apply additional existing AWS security groups, which are associated with your existing VPC, to control plane and compute machines. Applying custom security groups can help you meet the security needs of your organization, in such cases where you need to control the incoming or outgoing traffic of these machines. - -As part of the installation process, you apply custom security groups by modifying the `install-config.yaml` file before deploying the cluster. - -ifndef::localzone[] -For more information, see "Applying existing AWS security groups to the cluster". -endif::localzone[] -ifdef::localzone[] -For more information, see "The edge compute pool for AWS Local Zones". -endif::localzone[] - -ifeval::["{context}" == "installing-aws-localzone"] -:!localzone: -endif::[] diff --git a/modules/installation-aws-tested-machine-types.adoc b/modules/installation-aws-tested-machine-types.adoc deleted file mode 100644 index 3720f7385cb1..000000000000 --- a/modules/installation-aws-tested-machine-types.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-china.adoc -// installing/installing_aws/installing-aws-customizations.adoc -// installing/installing_aws/installing-aws-government-region.adoc -// installing/installing_aws/installing-aws-network-customizations.adoc -// installing/installing_aws/installing-aws-private.adoc -// installing/installing_aws/installing-aws-secret-region.adoc -// installing/installing_aws/installing-aws-user-infra.adoc -// installing/installing_aws/installing-aws-vpc.adoc -// installing/installing_aws/installing-restricted-networks-aws.adoc -// installing-aws-localzone - -ifeval::["{context}" == "installing-aws-localzone"] -:localzone: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:secretregion: -endif::[] - -[id="installation-aws-tested-machine-types_{context}"] -= Tested instance types for AWS - -The following Amazon Web Services (AWS) instance types have been tested with -ifndef::localzone[] -{product-title}. -endif::localzone[] -ifdef::localzone[] -{product-title} for use with AWS Local Zones. -endif::localzone[] - -[NOTE] -==== -Use the machine types included in the following charts for your AWS instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". -==== - -ifndef::localzone,secretregion[] -.Machine types based on 64-bit x86 architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/aws/tested_instance_types_x86_64.md[] -==== -endif::localzone,secretregion[] -ifdef::localzone[] -.Machine types based on 64-bit x86 architecture for AWS Local Zones -[%collapsible] -==== -* `c5.*` -* `c5d.*` -* `m6i.*` -* `m5.*` -* `r5.*` -* `t3.*` -==== -endif::localzone[] -ifdef::secretregion[] -.Machine types based on 64-bit x86 architecture for secret regions -[%collapsible] -==== -* `c4.*` -* `c5.*` -* `i3.*` -* `m4.*` -* `m5.*` -* `r4.*` -* `r5.*` -* `t3.*` -==== -endif::secretregion[] - -ifeval::["{context}" == "installing-aws-localzone"] -:!localzone: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!secretregion: -endif::[] \ No newline at end of file diff --git a/modules/installation-aws-upload-custom-rhcos-ami.adoc b/modules/installation-aws-upload-custom-rhcos-ami.adoc deleted file mode 100644 index 1830faedf42c..000000000000 --- a/modules/installation-aws-upload-custom-rhcos-ami.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:aws-gov: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-upload-custom-rhcos-ami_{context}"] -= Uploading a custom {op-system} AMI in AWS - -If you are deploying to a custom Amazon Web Services (AWS) region, you must -upload a custom {op-system-first} Amazon Machine Image (AMI) that belongs to -that region. - -.Prerequisites - -* You configured an AWS account. -* You created an Amazon S3 bucket with the required IAM -link:https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role[service role]. -* You uploaded your {op-system} VMDK file to Amazon S3. -ifdef::openshift-enterprise,openshift-webscale[] -The {op-system} VMDK file must be the highest version that is less than or equal to the {product-title} version you are installing. -endif::[] -* You downloaded the AWS CLI and installed it on your computer. See -link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer]. - -.Procedure - -. Export your AWS profile as an environment variable: -+ -[source,terminal] ----- -$ export AWS_PROFILE= <1> ----- -ifdef::aws-gov[<1> The AWS profile name that holds your AWS credentials, like `govcloud`.] -ifdef::aws-china[<1> The AWS profile name that holds your AWS credentials, like `beijingadmin`.] - -. Export the region to associate with your custom AMI as an environment -variable: -+ -[source,terminal] ----- -$ export AWS_DEFAULT_REGION= <1> ----- -ifdef::aws-gov[<1> The AWS region, like `us-gov-east-1`.] -ifdef::aws-china[<1> The AWS region, like `cn-north-1`.] - -. Export the version of {op-system} you uploaded to Amazon S3 as an environment -variable: -+ -[source,terminal] ----- -$ export RHCOS_VERSION= <1> ----- -<1> The {op-system} VMDK version, like `{product-version}.0`. - -. Export the Amazon S3 bucket name as an environment variable: -+ -[source,terminal] ----- -$ export VMIMPORT_BUCKET_NAME= ----- - -. Create the `containers.json` file and define your {op-system} VMDK file: -+ -[source,terminal] ----- -$ cat < containers.json -{ - "Description": "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64", - "Format": "vmdk", - "UserBucket": { - "S3Bucket": "${VMIMPORT_BUCKET_NAME}", - "S3Key": "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64.vmdk" - } -} -EOF ----- - -. Import the {op-system} disk as an Amazon EBS snapshot: -+ -[source,terminal] ----- -$ aws ec2 import-snapshot --region ${AWS_DEFAULT_REGION} \ - --description "" \ <1> - --disk-container "file:///containers.json" <2> ----- -<1> The description of your {op-system} disk being imported, like -`rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64`. -<2> The file path to the JSON file describing your {op-system} disk. The JSON -file should contain your Amazon S3 bucket name and key. - -. Check the status of the image import: -+ -[source,terminal] ----- -$ watch -n 5 aws ec2 describe-import-snapshot-tasks --region ${AWS_DEFAULT_REGION} ----- -+ -.Example output -[source,terminal] ----- -{ - "ImportSnapshotTasks": [ - { - "Description": "rhcos-4.7.0-x86_64-aws.x86_64", - "ImportTaskId": "import-snap-fh6i8uil", - "SnapshotTaskDetail": { - "Description": "rhcos-4.7.0-x86_64-aws.x86_64", - "DiskImageSize": 819056640.0, - "Format": "VMDK", - "SnapshotId": "snap-06331325870076318", - "Status": "completed", - "UserBucket": { - "S3Bucket": "external-images", - "S3Key": "rhcos-4.7.0-x86_64-aws.x86_64.vmdk" - } - } - } - ] -} ----- -+ -Copy the `SnapshotId` to register the image. - -. Create a custom {op-system} AMI from the {op-system} snapshot: -+ -[source,terminal] ----- -$ aws ec2 register-image \ - --region ${AWS_DEFAULT_REGION} \ - --architecture x86_64 \ <1> - --description "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64" \ <2> - --ena-support \ - --name "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64" \ <3> - --virtualization-type hvm \ - --root-device-name '/dev/xvda' \ - --block-device-mappings 'DeviceName=/dev/xvda,Ebs={DeleteOnTermination=true,SnapshotId=}' <4> ----- -<1> The {op-system} VMDK architecture type, like `x86_64`, -ifndef::openshift-origin[] -`aarch64`, -endif::openshift-origin[] -`s390x`, or `ppc64le`. -<2> The `Description` from the imported snapshot. -<3> The name of the {op-system} AMI. -<4> The `SnapshotID` from the imported snapshot. - -To learn more about these APIs, see the AWS documentation for -link:https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html[importing snapshots] -and link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot[creating EBS-backed AMIs]. - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-gov: -endif::[] diff --git a/modules/installation-aws-user-infra-bootstrap.adoc b/modules/installation-aws-user-infra-bootstrap.adoc deleted file mode 100644 index a36b29d52c1a..000000000000 --- a/modules/installation-aws-user-infra-bootstrap.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-bootstrap_{context}"] -= Initializing the bootstrap sequence on AWS with user-provisioned infrastructure - -After you create all of the required infrastructure in Amazon Web Services (AWS), -you can start the bootstrap sequence that initializes the {product-title} control plane. - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. -* You created the bootstrap machine. -* You created the control plane machines. -* You created the worker nodes. - -.Procedure - -. Change to the directory that contains the installation program and start the bootstrap process that initializes the {product-title} control plane: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete --dir \ <1> - --log-level=info <2> ----- -<1> For ``, specify the path to the directory that you -stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 20m0s for the Kubernetes API at https://api.mycluster.example.com:6443... -INFO API v1.27.3 up -INFO Waiting up to 30m0s for bootstrapping to complete... -INFO It is now safe to remove the bootstrap resources -INFO Time elapsed: 1s ----- -+ -If the command exits without a `FATAL` warning, your {product-title} control plane -has initialized. -+ -[NOTE] -==== -After the control plane initializes, it sets up the compute nodes and installs additional services in the form of Operators. -==== diff --git a/modules/installation-aws-user-infra-delete-bootstrap.adoc b/modules/installation-aws-user-infra-delete-bootstrap.adoc deleted file mode 100644 index ba730b6a57e5..000000000000 --- a/modules/installation-aws-user-infra-delete-bootstrap.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-delete-bootstrap_{context}"] -= Deleting the bootstrap resources - -After you complete the initial Operator configuration for the cluster, remove the bootstrap resources from Amazon Web Services (AWS). - -.Prerequisites - -* You completed the initial Operator configuration for your cluster. - -.Procedure - -. Delete the bootstrap resources. If you used the CloudFormation template, -link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html[delete its stack]: -** Delete the stack by using the AWS CLI: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name <1> ----- -<1> `` is the name of your bootstrap stack. -** Delete the stack by using the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. diff --git a/modules/installation-aws-user-infra-installation.adoc b/modules/installation-aws-user-infra-installation.adoc deleted file mode 100644 index 9a5892a23036..000000000000 --- a/modules/installation-aws-user-infra-installation.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -endif::[] -ifdef::openshift-origin[] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-installation_{context}"] -= Completing an AWS installation on user-provisioned infrastructure - -After you start the {product-title} installation on Amazon Web Service (AWS) -user-provisioned infrastructure, monitor the deployment to completion. - -.Prerequisites - -* You removed the bootstrap node for an {product-title} cluster on user-provisioned AWS infrastructure. -* You installed the `oc` CLI. - -.Procedure - -ifdef::restricted[] -. From the directory that contains the installation program, complete -endif::restricted[] -ifndef::restricted[] -* From the directory that contains the installation program, complete -endif::restricted[] -the cluster installation: -+ -[source,terminal] ----- -$ ./openshift-install --dir wait-for install-complete <1> ----- -<1> For ``, specify the path to the directory that you -stored the installation files in. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 40m0s for the cluster at https://api.mycluster.example.com:6443 to initialize... -INFO Waiting up to 10m0s for the openshift-console route to be created... -INFO Install complete! -INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com -INFO Login to the console with user: "kubeadmin", and password: "password" -INFO Time elapsed: 1s ----- -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifdef::restricted[] -. Register your cluster on the link:https://console.redhat.com/openshift/register[Cluster registration] page. -endif::restricted[] - - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] diff --git a/modules/installation-aws-user-infra-requirements.adoc b/modules/installation-aws-user-infra-requirements.adoc deleted file mode 100644 index 4f98ac7b8f4a..000000000000 --- a/modules/installation-aws-user-infra-requirements.adoc +++ /dev/null @@ -1,564 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-user-infra-requirements_{context}"] -= Required AWS infrastructure components - -To install {product-title} on user-provisioned infrastructure in Amazon Web Services (AWS), you must manually create both the machines and their supporting infrastructure. - -For more information about the integration testing for different platforms, see the link:https://access.redhat.com/articles/4128421[OpenShift Container Platform 4.x Tested Integrations] page. - -By using the provided CloudFormation templates, you can create stacks of AWS resources that represent the following components: - -* An AWS Virtual Private Cloud (VPC) -* Networking and load balancing components -* Security groups and roles -* An {product-title} bootstrap node -* {product-title} control plane nodes -* An {product-title} compute node - -Alternatively, you can manually create the components or you can reuse existing infrastructure that meets the cluster requirements. Review the CloudFormation templates for more details about how the components interrelate. - -[id="installation-aws-user-infra-other-infrastructure_{context}"] -== Other infrastructure components - -* A VPC -* DNS entries -* Load balancers (classic or network) and listeners -* A public and a private Route 53 zone -* Security groups -* IAM roles -* S3 buckets - -If you are working in a disconnected environment, you are unable to reach the public IP addresses for EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: - -[discrete] -[id="create-vpc-endpoints_{context}"] -=== Option 1: Create VPC endpoints - -Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -* `ec2..amazonaws.com` -* `elasticloadbalancing..amazonaws.com` -* `s3..amazonaws.com` - -With this option, network traffic remains private between your VPC and the required AWS services. - -[discrete] -[id="create-proxy-without-vpc-endpoints_{context}"] -=== Option 2: Create a proxy without VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. - -[discrete] -[id="create-proxy-with-vpc-endpoints_{context}"] -=== Option 3: Create a proxy with VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -* `ec2..amazonaws.com` -* `elasticloadbalancing..amazonaws.com` -* `s3..amazonaws.com` - -When configuring the proxy in the `install-config.yaml` file, add these endpoints to the `noProxy` field. With this option, the proxy prevents the cluster from accessing the internet directly. However, network traffic remains private between your VPC and the required AWS services. - -.Required VPC components - -You must provide a suitable VPC and subnets that allow communication to your -machines. - -[cols="2a,7a,3a,3a",options="header"] -|=== - -|Component -|AWS type -2+|Description - -|VPC -|* `AWS::EC2::VPC` -* `AWS::EC2::VPCEndpoint` -2+|You must provide a public VPC for the cluster to use. The VPC uses an -endpoint that references the route tables for each subnet to improve communication with the registry that is hosted in S3. - -|Public subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::SubnetNetworkAclAssociation` -2+|Your VPC must have public subnets for between 1 and 3 availability zones -and associate them with appropriate Ingress rules. - -|Internet gateway -| -* `AWS::EC2::InternetGateway` -* `AWS::EC2::VPCGatewayAttachment` -* `AWS::EC2::RouteTable` -* `AWS::EC2::Route` -* `AWS::EC2::SubnetRouteTableAssociation` -* `AWS::EC2::NatGateway` -* `AWS::EC2::EIP` -2+|You must have a public internet gateway, with public routes, attached to the -VPC. In the provided templates, each public subnet has a NAT gateway with an EIP address. These NAT gateways allow cluster resources, like private subnet instances, to reach the internet and are not required for some restricted network or proxy scenarios. - -.7+|Network access control -.7+| * `AWS::EC2::NetworkAcl` -* `AWS::EC2::NetworkAclEntry` -2+|You must allow the VPC to access the following ports: -h|Port -h|Reason - -|`80` -|Inbound HTTP traffic - -|`443` -|Inbound HTTPS traffic - -|`22` -|Inbound SSH traffic - -|`1024` - `65535` -|Inbound ephemeral traffic - -|`0` - `65535` -|Outbound ephemeral traffic - - -|Private subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::RouteTable` -* `AWS::EC2::SubnetRouteTableAssociation` -2+|Your VPC can have private subnets. The provided CloudFormation templates -can create private subnets for between 1 and 3 availability zones. -If you use private subnets, you must provide appropriate routes and tables -for them. - -|=== - - -.Required DNS and load balancing components - -Your DNS and load balancer configuration needs to use a public hosted zone and -can use a private hosted zone similar to the one that the installation program -uses if it provisions the cluster's infrastructure. You must -create a DNS entry that resolves to your load balancer. An entry for -`api..` must point to the external load balancer, and an -entry for `api-int..` must point to the internal load -balancer. - -The cluster also requires load balancers and listeners for port 6443, which are -required for the Kubernetes API and its extensions, and port 22623, which are -required for the Ignition config files for new machines. The targets will be the -control plane nodes. Port 6443 must be accessible to both clients external to the -cluster and nodes within the cluster. Port 22623 must be accessible to nodes -within the cluster. - - -[cols="2a,2a,8a",options="header"] -|=== - -|Component -|AWS type -|Description - -|DNS -|`AWS::Route53::HostedZone` -|The hosted zone for your internal DNS. - -|Public load balancer -|`AWS::ElasticLoadBalancingV2::LoadBalancer` -|The load balancer for your public subnets. - -|External API server record -|`AWS::Route53::RecordSetGroup` -|Alias records for the external API server. - -|External listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 6443 for the external load balancer. - -|External target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the external load balancer. - -|Private load balancer -|`AWS::ElasticLoadBalancingV2::LoadBalancer` -|The load balancer for your private subnets. - -|Internal API server record -|`AWS::Route53::RecordSetGroup` -|Alias records for the internal API server. - -|Internal listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 22623 for the internal load balancer. - -|Internal target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the internal load balancer. - -|Internal listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 6443 for the internal load balancer. - -|Internal target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the internal load balancer. - -|=== - -.Security groups - -The control plane and worker machines require access to the following ports: - -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Group -|Type -|IP Protocol -|Port range - - -.4+|`MasterSecurityGroup` -.4+|`AWS::EC2::SecurityGroup` -|`icmp` -|`0` - -|`tcp` -|`22` - -|`tcp` -|`6443` - -|`tcp` -|`22623` - -.2+|`WorkerSecurityGroup` -.2+|`AWS::EC2::SecurityGroup` -|`icmp` -|`0` - -|`tcp` -|`22` - - -.2+|`BootstrapSecurityGroup` -.2+|`AWS::EC2::SecurityGroup` - -|`tcp` -|`22` - -|`tcp` -|`19531` - -|=== - -.Control plane Ingress - -The control plane machines require the following Ingress groups. Each Ingress group is -a `AWS::EC2::SecurityGroupIngress` resource. - -[cols="2a,5a,2a,2a",options="header"] -|=== - -|Ingress group -|Description -|IP protocol -|Port range - - -|`MasterIngressEtcd` -|etcd -|`tcp` -|`2379`- `2380` - -|`MasterIngressVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`MasterIngressWorkerVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`MasterIngressInternal` -|Internal cluster communication and Kubernetes proxy metrics -|`tcp` -|`9000` - `9999` - -|`MasterIngressWorkerInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`MasterIngressKube` -|Kubernetes kubelet, scheduler and controller manager -|`tcp` -|`10250` - `10259` - -|`MasterIngressWorkerKube` -|Kubernetes kubelet, scheduler and controller manager -|`tcp` -|`10250` - `10259` - -|`MasterIngressIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`MasterIngressWorkerIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`MasterIngressGeneve` -|Geneve packets -|`udp` -|`6081` - -|`MasterIngressWorkerGeneve` -|Geneve packets -|`udp` -|`6081` - -|`MasterIngressIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`MasterIngressWorkerIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`MasterIngressIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`MasterIngressWorkerIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`MasterIngressIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`MasterIngressWorkerIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`MasterIngressInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`MasterIngressWorkerInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`MasterIngressIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|`MasterIngressWorkerIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|=== - - -.Worker Ingress - -The worker machines require the following Ingress groups. Each Ingress group is -a `AWS::EC2::SecurityGroupIngress` resource. - -[cols="2a,5a,2a,2a",options="header"] -|=== - -|Ingress group -|Description -|IP protocol -|Port range - - -|`WorkerIngressVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`WorkerIngressWorkerVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`WorkerIngressInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`WorkerIngressWorkerInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`WorkerIngressKube` -|Kubernetes kubelet, scheduler, and controller manager -|`tcp` -|`10250` - -|`WorkerIngressWorkerKube` -|Kubernetes kubelet, scheduler, and controller manager -|`tcp` -|`10250` - -|`WorkerIngressIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`WorkerIngressWorkerIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`WorkerIngressGeneve` -|Geneve packets -|`udp` -|`6081` - -|`WorkerIngressMasterGeneve` -|Geneve packets -|`udp` -|`6081` - -|`WorkerIngressIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`WorkerIngressMasterIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`WorkerIngressIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`WorkerIngressMasterIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`WorkerIngressIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`WorkerIngressMasterIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`WorkerIngressInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`WorkerIngressMasterInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`WorkerIngressIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|`WorkerIngressMasterIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|=== - - -.Roles and instance profiles - -You must grant the machines permissions in AWS. The provided CloudFormation -templates grant the machines `Allow` permissions for the following `AWS::IAM::Role` objects -and provide a `AWS::IAM::InstanceProfile` for each set of roles. If you do -not use the templates, you can grant the machines the following broad permissions -or the following individual permissions. - -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Role -|Effect -|Action -|Resource - -.4+|Master -|`Allow` -|`ec2:*` -|`*` - -|`Allow` -|`elasticloadbalancing:*` -|`*` - -|`Allow` -|`iam:PassRole` -|`*` - -|`Allow` -|`s3:GetObject` -|`*` - -|Worker -|`Allow` -|`ec2:Describe*` -|`*` - - -.3+|Bootstrap -|`Allow` -|`ec2:Describe*` -|`*` - -|`Allow` -|`ec2:AttachVolume` -|`*` - -|`Allow` -|`ec2:DetachVolume` -|`*` - -|`Allow` -|`s3:GetObject` -|`*` - -|=== - -[id="installation-aws-user-infra-cluster-machines_{context}"] -== Cluster machines - -You need `AWS::EC2::Instance` objects for the following machines: - -* A bootstrap machine. This machine is required during installation, but you can remove it after your cluster deploys. -* Three control plane machines. The control plane machines are not governed by a control plane machine set. -* Compute machines. You must create at least two compute machines, which are also known as worker machines, during installation. These machines are not governed by a compute machine set. - -//// -You can also create and control them by using a MachineSet after your -control plane initializes and you can access the cluster API by using the `oc` -command line interface. -//// diff --git a/modules/installation-aws-user-infra-rhcos-ami.adoc b/modules/installation-aws-user-infra-rhcos-ami.adoc deleted file mode 100644 index bdaa1f3dd69c..000000000000 --- a/modules/installation-aws-user-infra-rhcos-ami.adoc +++ /dev/null @@ -1,210 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-user-infra-rhcos-ami_{context}"] -= {op-system} AMIs for the AWS infrastructure - -Red Hat provides {op-system-first} AMIs that are valid for the various AWS regions and instance architectures that you can manually specify for your {product-title} nodes. - -[NOTE] -==== -By importing your own AMI, you can also install to regions that do not have a published {op-system} AMI. -==== - -ifndef::openshift-origin[] -.x86_64 {op-system} AMIs - -[cols="2a,2a",options="header"] -|=== - -|AWS zone -|AWS AMI - -|`af-south-1` -|`ami-052b3e6b060b5595d` - -|`ap-east-1` -|`ami-09c502968481ee218` - -|`ap-northeast-1` -|`ami-06b1dbe049e3c1d23` - -|`ap-northeast-2` -|`ami-08add6eb5aa1c8639` - -|`ap-northeast-3` -|`ami-0af4dfc64506fe20e` - -|`ap-south-1` -|`ami-09b1532dd3d63fdc0` - -|`ap-south-2` -|`ami-0a915cedf8558e600` - -|`ap-southeast-1` -|`ami-0c914fd7a50130c9e` - -|`ap-southeast-2` -|`ami-04b54199f4be0ec9d` - -|`ap-southeast-3` -|`ami-0be3ee78b9a3fdf07` - -|`ap-southeast-4` -|`ami-00a44d7d5054bb5f8` - -|`ca-central-1` -|`ami-0bb1fd49820ea09ae` - -|`eu-central-1` -|`ami-03d9cb166a11c9b8a` - -|`eu-central-2` -|`ami-089865c640f876630` - -|`eu-north-1` -|`ami-0e94d896e72eeae0d` - -|`eu-south-1` -|`ami-04df4e2850dce0721` - -|`eu-south-2` -|`ami-0d80de3a5ba722545` - -|`eu-west-1` -|`ami-066f2d86026ef97a8` - -|`eu-west-2` -|`ami-0f1c0b26b1c99499d` - -|`eu-west-3` -|`ami-0f639505a9c74d9a2` - -|`me-central-1` -|`ami-0fbb2ece8478f1402` - -|`me-south-1` -|`ami-01507551558853852` - -|`sa-east-1` -|`ami-097132aa0da53c981` - -|`us-east-1` -|`ami-0624891c612b5eaa0` - -|`us-east-2` -|`ami-0dc6c4d1bd5161f13` - -|`us-gov-east-1` -|`ami-0bab20368b3b9b861` - -|`us-gov-west-1` -|`ami-0fe8299f8e808e720` - -|`us-west-1` -|`ami-0c03b7e5954f10f9b` - -|`us-west-2` -|`ami-0f4cdfd74e4a3fc29` - -|=== - -.aarch64 {op-system} AMIs - -[cols="2a,2a",options="header"] -|=== - -|AWS zone -|AWS AMI - -|`af-south-1` -|`ami-0d684ca7c09e6f5fc` - -|`ap-east-1` -|`ami-01b0e1c24d180fe5d` - -|`ap-northeast-1` -|`ami-06439c626e2663888` - -|`ap-northeast-2` -|`ami-0a19d3bed3a2854e3` - -|`ap-northeast-3` -|`ami-08b8fa76fd46b5c58` - -|`ap-south-1` -|`ami-0ec6463b788929a6a` - -|`ap-south-2` -|`ami-0f5077b6d7e1b10a5` - -|`ap-southeast-1` -|`ami-081a6c6a24e2ee453` - -|`ap-southeast-2` -|`ami-0a70049ac02157a02` - -|`ap-southeast-3` -|`ami-065fd6311a9d7e6a6` - -|`ap-southeast-4` -|`ami-0105993dc2508c4f4` - -|`ca-central-1` -|`ami-04582d73d5aad9a85` - -|`eu-central-1` -|`ami-0f72c8b59213f628e` - -|`eu-central-2` -|`ami-0647f43516c31119c` - -|`eu-north-1` -|`ami-0d155ca6a531f5f72` - -|`eu-south-1` -|`ami-02f8d2794a663dbd0` - -|`eu-south-2` -|`ami-0427659985f520cae` - -|`eu-west-1` -|`ami-04e9944a8f9761c3e` - -|`eu-west-2` -|`ami-09c701f11d9a7b167` - -|`eu-west-3` -|`ami-02cd8181243610e0d` - -|`me-central-1` -|`ami-03008d03f133e6ec0` - -|`me-south-1` -|`ami-096bc3b4ec0faad76` - -|`sa-east-1` -|`ami-01f9b5a4f7b8c50a1` - -|`us-east-1` -|`ami-09ea6f8f7845792e1` - -|`us-east-2` -|`ami-039cdb2bf3b5178da` - -|`us-gov-east-1` -|`ami-0fed54a5ab75baed0` - -|`us-gov-west-1` -|`ami-0fc5be5af4bb1d79f` - -|`us-west-1` -|`ami-018e5407337da1062` - -|`us-west-2` -|`ami-0c0c67ef81b80e8eb` - -|=== -endif::openshift-origin[] diff --git a/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc b/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc deleted file mode 100644 index 9408672fb476..000000000000 --- a/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc - -:_content-type: CONCEPT -[id="connecting-the-vpc-to-the-on-premise-network_{context}"] -= Connecting the VPC to the on-premise network - -To expand the {product-title} cluster deployed on AWS with on-premise bare metal nodes, you must establish network connectivity between them. You will need to configure the networking using a virtual private network or AWS Direct Connect between the AWS VPC and your on-premise network. This allows traffic to flow between the on-premise nodes and the AWS nodes. - -Additionally, you need to ensure secure access to the Baseboard Management Controllers (BMCs) of the bare metal nodes. When expanding the cluster with the Baremetal Operator, access to the BMCs is required for remotely managing and monitoring the hardware of your on-premise nodes. - -To securely access the BMCs, you can create a separate, secure network segment or use a dedicated VPN connection specifically for BMC access. This way, you can isolate the BMC traffic from other network traffic, reducing the risk of unauthorized access or potential vulnerabilities. - -[WARNING] -==== -Misconfiguration of the network connection between the AWS and on-premise environments can expose the on-premise network and bare-metal nodes to the internet. That is a significant security risk, which might result in an attacker having full access to the exposed machines, and through them to the private network in these environments. -==== diff --git a/modules/installation-aws_con_installing-sno-on-aws.adoc b/modules/installation-aws_con_installing-sno-on-aws.adoc deleted file mode 100644 index a09e50c3bd5c..000000000000 --- a/modules/installation-aws_con_installing-sno-on-aws.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_sno/install-sno-installing-sno.adoc - -:_content-type: CONCEPT -[id="installing-sno-on-aws_{context}"] -= Installing {sno} on AWS - -Installing a single node cluster on AWS requires installer-provisioned installation using the "Installing a cluster on AWS with customizations" procedure. diff --git a/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc b/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc deleted file mode 100644 index bcf43736d62c..000000000000 --- a/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc - -:_content-type: PROCEDURE -[id="creating-firewall-rules-for-port-6183_{context}"] -= Creating firewall rules for port 6183 - -Port `6183` is open by default on the control plane. However, you must create a firewall rule for the VPC connection and for the on-premise network for the bare metal nodes to allow inbound and outbound traffic on that port. - -.Procedure - -. Modify the AWS VPC security group to open port `6183`: - -.. Navigate to the Amazon VPC console in the AWS Management Console. -.. In the left navigation pane, click on **Security Groups**. -.. Find and select the security group associated with the {product-title} cluster. -.. In the **Inbound rules** tab, click **Edit inbound rules**. -.. Click **Add rule** and select **Custom TCP Rule** as the rule type. -.. In the **Port range** field, enter `6183`. -.. In the **Source** field, specify the CIDR block for the on-premise network or the security group ID of the peered VPC (if you have VPC peering) to allow traffic only from the desired sources. -.. Click **Save rules**. - -. Modify the AWS VPC network access control lists to open port `6183`: - -.. In the Amazon VPC console, click on **Network ACLs** in the left navigation pane. -.. Find and select the network ACL associated with your {product-title} cluster's VPC. -.. In the **Inbound rules** tab, click **Edit inbound rules**. -.. Click **Add rule** and enter a rule number in the **Rule #** field. Choose a number that doesn't conflict with existing rules. -.. Select `TCP` as the protocol. -.. In the **Port range** field, enter `6183`. -.. In the **Source** field, specify the CIDR block for the on-premise network to allow traffic only from the desired sources. -.. Click **Save** to save the new rule. -.. Repeat the same process for the **Outbound rules** tab to allow outbound traffic on port `6183`. - -. Modify the on-premise network to allow traffic on port `6183`: - -.. Execute the following command to identify the zone you want to modify: -+ -[source,terminal] ----- -$ sudo firewall-cmd --list-all-zones ----- - -.. To open port `6183` for TCP traffic in the desired zone execute the following command: -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone= --add-port=6183/tcp --permanent ----- -+ -Replace `` with the appropriate zone name. - -.. Reload `firewalld` to apply the new rule: -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- \ No newline at end of file diff --git a/modules/installation-azure-about-government-region.adoc b/modules/installation-azure-about-government-region.adoc deleted file mode 100644 index 0ef5078be7f0..000000000000 --- a/modules/installation-azure-about-government-region.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-government-region.adoc - -[id="installation-azure-about-government-region_{context}"] -= Azure government regions - -{product-title} supports deploying a cluster to -link:https://docs.microsoft.com/en-us/azure/azure-government/documentation-government-welcome[Microsoft Azure Government (MAG)] -regions. MAG is specifically designed for US government agencies at the federal, -state, and local level, as well as contractors, educational institutions, and -other US customers that must run sensitive workloads on Azure. MAG is composed -of government-only data center regions, all granted an -link:https://docs.microsoft.com/en-us/microsoft-365/compliance/offering-dod-disa-l2-l4-l5?view=o365-worldwide#dod-impact-level-5-provisional-authorization[Impact Level 5 Provisional Authorization]. - -Installing to a MAG region requires manually configuring the Azure Government -dedicated cloud instance and region in the `install-config.yaml` file. You must -also update your service principal to reference the appropriate government -environment. - -[NOTE] -==== -The Azure government region cannot be selected using the guided terminal prompts -from the installation program. You must define the region manually in the -`install-config.yaml` file. Remember to also set the dedicated cloud instance, -like `AzureUSGovernmentCloud`, based on the region specified. -==== diff --git a/modules/installation-azure-arm-tested-machine-types.adoc b/modules/installation-azure-arm-tested-machine-types.adoc deleted file mode 100644 index d11dace44f89..000000000000 --- a/modules/installation-azure-arm-tested-machine-types.adoc +++ /dev/null @@ -1,20 +0,0 @@ - -// Module included in the following assemblies: -// -// installing/installing_azure/installing-azure-customizations.adoc -// installing/installing_azure/installing-azure-government-region.adoc -// installing/installing_azure/installing-azure-network-customizations.adoc -// installing/installing_azure/installing-azure-private.adoc -// installing/installing_azure/installing-azure-user-infra.adoc -// installing/installing_azure/installing-azure-vnet.adoc - -[id="installation-azure-arm-tested-machine-types_{context}"] -= Tested instance types for Azure on 64-bit ARM infrastructures - -The following Microsoft Azure ARM64 instance types have been tested with {product-title}. - -.Machine types based on 64-bit ARM architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/azure/tested_instance_types_aarch64.md[] -==== \ No newline at end of file diff --git a/modules/installation-azure-config-yaml.adoc b/modules/installation-azure-config-yaml.adoc deleted file mode 100644 index 66f7aeae24d2..000000000000 --- a/modules/installation-azure-config-yaml.adoc +++ /dev/null @@ -1,302 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - -ifeval::["{context}" == "installing-azure-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" != "installing-azure-network-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:vnet: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:private: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:gov: -endif::[] - -[id="installation-azure-config-yaml_{context}"] -= Sample customized install-config.yaml file for Azure - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> - hyperthreading: Enabled <3> <4> - name: master - platform: - azure: - encryptionAtHost: true - ultraSSDCapability: Enabled - osDisk: - diskSizeGB: 1024 <5> - diskType: Premium_LRS - diskEncryptionSet: - resourceGroup: disk_encryption_set_resource_group - name: disk_encryption_set_name - subscriptionId: secondary_subscription_id - type: Standard_D8s_v3 - replicas: 3 -compute: <2> -- hyperthreading: Enabled <3> - name: worker - platform: - azure: - ultraSSDCapability: Enabled - type: Standard_D2s_v3 - encryptionAtHost: true - osDisk: - diskSizeGB: 512 <5> - diskType: Standard_LRS - diskEncryptionSet: - resourceGroup: disk_encryption_set_resource_group - name: disk_encryption_set_name - subscriptionId: secondary_subscription_id - zones: <6> - - "1" - - "2" - - "3" - replicas: 5 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <2> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <7> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - defaultMachinePlatform: - ultraSSDCapability: Enabled - baseDomainResourceGroupName: resource_group <8> -ifndef::gov[] - region: centralus <1> -endif::gov[] -ifdef::gov[] - region: usgovvirginia -endif::gov[] - resourceGroupName: existing_resource_group <9> -ifdef::vnet,private,gov[] - networkResourceGroupName: vnet_resource_group <10> - virtualNetwork: vnet <11> - controlPlaneSubnet: control_plane_subnet <12> - computeSubnet: compute_subnet <13> -endif::vnet,private,gov[] -ifndef::private,gov[] - outboundType: Loadbalancer -endif::private,gov[] -ifdef::private,gov[] - outboundType: UserDefinedRouting <14> -endif::private,gov[] -ifndef::gov[] - cloudName: AzurePublicCloud -endif::gov[] -ifdef::gov[] - cloudName: AzureUSGovernmentCloud <15> -endif::gov[] -pullSecret: '{"auths": ...}' <1> -ifdef::vnet[] -ifndef::openshift-origin[] -fips: false <14> -sshKey: ssh-ed25519 AAAA... <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -endif::vnet[] -ifdef::private[] -ifndef::openshift-origin[] -fips: false <15> -sshKey: ssh-ed25519 AAAA... <16> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <15> -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -fips: false <16> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <17> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <16> -endif::openshift-origin[] -endif::gov[] -ifndef::vnet,private,gov[] -ifndef::openshift-origin[] -fips: false <10> -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <10> -endif::openshift-origin[] -endif::vnet,private,gov[] -ifdef::private[] -ifndef::openshift-origin[] -publish: Internal <17> -endif::openshift-origin[] -ifdef::openshift-origin[] -publish: Internal <16> -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -publish: Internal <18> -endif::openshift-origin[] -ifdef::openshift-origin[] -publish: Internal <17> -endif::openshift-origin[] -endif::gov[] ----- -ifndef::gov[] -<1> Required. The installation program prompts you for this value. -endif::gov[] -ifdef::gov[] -<1> Required. -endif::gov[] -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger virtual machine types, such as `Standard_D8s_v3`, for your machines if you disable simultaneous multithreading. -==== -<5> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -//To configure faster storage for etcd, especially for larger clusters, set the -//storage type as `io1` and set `iops` to `2000`. -<6> Specify a list of zones to deploy your machines to. For high availability, specify at least two zones. -<7> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<8> Specify the name of the resource group that contains the DNS zone for your base domain. -<9> Specify the name of an already existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -ifdef::vnet,private,gov[] -<10> If you use an existing VNet, specify the name of the resource group that contains it. -<11> If you use an existing VNet, specify its name. -<12> If you use an existing VNet, specify the name of the subnet to host the control plane machines. -<13> If you use an existing VNet, specify the name of the subnet to host the compute machines. -endif::vnet,private,gov[] -ifdef::private,gov[] -<14> You can customize your own outbound routing. Configuring user-defined routing prevents exposing external endpoints in your cluster. User-defined routing for egress requires deploying your cluster to an existing VNet. -endif::private,gov[] -ifdef::gov[] -<15> Specify the name of the Azure cloud environment to deploy your cluster to. Set `AzureUSGovernmentCloud` to deploy to a Microsoft Azure Government (MAG) region. The default value is `AzurePublicCloud`. -endif::gov[] -ifdef::vnet[] -ifndef::openshift-origin[] -<14> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<15> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vnet[] -ifdef::private[] -ifndef::openshift-origin[] -<15> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<16> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -<16> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<17> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<16> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::gov[] -ifndef::vnet,private,gov[] -ifndef::openshift-origin[] -<10> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<11> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<10> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vnet,private,gov[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::private[] -ifndef::openshift-origin[] -<17> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<16> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -<18> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<17> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::gov[] - -ifeval::["{context}" == "installing-azure-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" != "installing-azure-network-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!vnet: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!private: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!gov: -endif::[] diff --git a/modules/installation-azure-create-dns-zones.adoc b/modules/installation-azure-create-dns-zones.adoc deleted file mode 100644 index e8a84aab60d2..000000000000 --- a/modules/installation-azure-create-dns-zones.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-dns-zones_{context}"] -= Example for creating DNS zones - -DNS records are required for clusters that use user-provisioned infrastructure. -You should choose the DNS strategy that fits your scenario. - -ifndef::ash[] -For this example, link:https://docs.microsoft.com/en-us/azure/dns/dns-overview[Azure's DNS solution] -is used, so you will create a new public DNS zone for external (internet) -visibility and a private DNS zone for internal cluster resolution. -endif::ash[] -ifdef::ash[] -For this example, link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-integrate-dns?view=azs-2102[Azure Stack Hub's datacenter DNS integration] is used, so you will create a DNS zone. -endif::ash[] - -ifndef::ash[] -[NOTE] -==== -The public DNS zone is not required to exist in the same resource group as the -cluster deployment and might already exist in your organization for the desired base domain. If that is the case, you can skip creating the public DNS zone; be sure the installation config you generated earlier reflects that scenario. -==== -endif::ash[] - -ifdef::ash[] -[NOTE] -==== -The DNS zone is not required to exist in the same resource group as the -cluster deployment and might already exist in your organization for the desired base domain. If that is the case, you can skip creating the DNS zone; be sure the installation config you generated earlier reflects that scenario. -==== -endif::ash[] - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -.Procedure - -ifndef::ash[] -. Create the new public DNS zone in the resource group exported in the -`BASE_DOMAIN_RESOURCE_GROUP` environment variable: -endif::ash[] -ifdef::ash[] -* Create the new DNS zone in the resource group exported in the -`BASE_DOMAIN_RESOURCE_GROUP` environment variable: -endif::ash[] -+ -[source,terminal] ----- -$ az network dns zone create -g ${BASE_DOMAIN_RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} ----- -+ -ifndef::ash[You can skip this step if you are using a public DNS zone that already exists.] -ifdef::ash[You can skip this step if you are using a DNS zone that already exists.] - -ifndef::ash[] -. Create the private DNS zone in the same resource group as the rest of this -deployment: -+ -[source,terminal] ----- -$ az network private-dns zone create -g ${RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} ----- -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-azure-create-ingress-dns-records.adoc b/modules/installation-azure-create-ingress-dns-records.adoc deleted file mode 100644 index 7c126498561a..000000000000 --- a/modules/installation-azure-create-ingress-dns-records.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-ingress-dns-records_{context}"] -= Adding the Ingress DNS records - -If you removed the DNS Zone configuration when creating Kubernetes manifests and -generating Ignition configs, you must manually create DNS records that point at -the Ingress load balancer. You can create either a wildcard -`*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other -records per your requirements. - -.Prerequisites - -* You deployed an {product-title} cluster on Microsoft {cp} by using infrastructure that you provisioned. -* Install the OpenShift CLI (`oc`). -* Install or update the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-yum?view=azure-cli-latest[Azure CLI]. - -.Procedure - -. Confirm the Ingress router has created a load balancer and populated the -`EXTERNAL-IP` field: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20 ----- - -. Export the Ingress router IP as a variable: -+ -[source,terminal] ----- -$ export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'` ----- -ifndef::ash[] -. Add a `*.apps` record to the public DNS zone. - -.. If you are adding this cluster to a new public zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- - -.. If you are adding this cluster to an already existing public zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -endif::ash[] -ifdef::ash[] -. Add a `*.apps` record to the DNS zone. - -.. If you are adding this cluster to a new DNS zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -.. If you are adding this cluster to an already existing DNS zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -endif::ash[] - -ifndef::ash[] -. Add a `*.apps` record to the private DNS zone: -.. Create a `*.apps` record by using the following command: -+ -[source,terminal] ----- -$ az network private-dns record-set a create -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps --ttl 300 ----- -.. Add the `*.apps` record to the private DNS zone by using the following command: -+ -[source,terminal] ----- -$ az network private-dns record-set a add-record -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} ----- -endif::ash[] - -If you prefer to add explicit domains instead of using a wildcard, you can -create entries for each of the cluster's current routes: - -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- - -.Example output -[source,terminal] ----- -oauth-openshift.apps.cluster.basedomain.com -console-openshift-console.apps.cluster.basedomain.com -downloads-openshift-console.apps.cluster.basedomain.com -alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com -prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com ----- - -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-create-resource-group-and-identity.adoc b/modules/installation-azure-create-resource-group-and-identity.adoc deleted file mode 100644 index 00b7ce116ce1..000000000000 --- a/modules/installation-azure-create-resource-group-and-identity.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-resource-group-and-identity_{context}"] -= Creating the Azure resource group - -ifdef::azure[] -You must create a Microsoft Azure link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group] and an identity for that resource group. These are both used during the installation of your {product-title} cluster on Azure. -endif::azure[] -ifdef::ash[] -You must create a Microsoft Azure link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group]. This is used during the installation of your {product-title} cluster on Azure Stack Hub. -endif::ash[] - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -.Procedure - -ifdef::azure[] -. Create the resource group in a supported Azure region: -endif::azure[] -ifdef::ash[] -* Create the resource group in a supported Azure region: -endif::ash[] -+ -[source,terminal] ----- -$ az group create --name ${RESOURCE_GROUP} --location ${AZURE_REGION} ----- - -ifdef::azure[] -. Create an Azure identity for the resource group: -+ -[source,terminal] ----- -$ az identity create -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity ----- -+ -This is used to grant the required access to Operators in your cluster. For -example, this allows the Ingress Operator to create a public IP and its load -balancer. You must assign the Azure identity to a role. - -. Grant the Contributor role to the Azure identity: - -.. Export the following variables required by the Azure role assignment: -+ -[source,terminal] ----- -$ export PRINCIPAL_ID=`az identity show -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity --query principalId --out tsv` ----- -+ -[source,terminal] ----- -$ export RESOURCE_GROUP_ID=`az group show -g ${RESOURCE_GROUP} --query id --out tsv` ----- - -.. Assign the Contributor role to the identity: -+ -[source,terminal] ----- -$ az role assignment create --assignee "${PRINCIPAL_ID}" --role 'Contributor' --scope "${RESOURCE_GROUP_ID}" ----- -+ -[NOTE] -==== -If you want to assign a custom role with all the required permissions to the identity, run the following command: -[source,terminal] ----- -$ az role assignment create --assignee "${PRINCIPAL_ID}" --role \ <1> ---scope "${RESOURCE_GROUP_ID}" ----- -<1> Specifies the custom role name. -==== -endif::azure[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-azure-finalizing-encryption.adoc b/modules/installation-azure-finalizing-encryption.adoc deleted file mode 100644 index 84bfe847e7eb..000000000000 --- a/modules/installation-azure-finalizing-encryption.adoc +++ /dev/null @@ -1,155 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - - -ifeval::["{context}" == "installing-azure-customizations"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure-public: -endif::[] - -:_content-type: PROCEDURE -[id="finalizing-encryption_{context}"] -= Finalizing user-managed encryption after installation -If you installed {product-title} using a user-managed encryption key, you can complete the installation by creating a new storage class and granting write permissions to the Azure cluster resource group. - -.Procedure - -. Obtain the identity of the cluster resource group used by the installer: -.. If you specified an existing resource group in `install-config.yaml`, obtain its Azure identity by running the following command: -+ -[source,terminal] ----- -$ az identity list --resource-group "" ----- -.. If you did not specify a existing resource group in `install-config.yaml`, locate the resource group that the installer created, and then obtain its Azure identity by running the following commands: -+ -[source,terminal] ----- -$ az group list ----- -+ -[source,terminal] ----- -$ az identity list --resource-group "" ----- -+ -. Grant a role assignment to the cluster resource group so that it can write to the Disk Encryption Set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --role "" \// <1> - --assignee "" <2> ----- -<1> Specifies an Azure role that has read/write permissions to the disk encryption set. You can use the `Owner` role or a custom role with the necessary permissions. -<2> Specifies the identity of the cluster resource group. -+ -. Obtain the `id` of the disk encryption set you created prior to installation by running the following command: -+ -[source,terminal] ----- -$ az disk-encryption-set show -n \// <1> - --resource-group <2> ----- -<1> Specifies the name of the disk encryption set. -<2> Specifies the resource group that contains the disk encryption set. -The `id` is in the format of `"/subscriptions/.../resourceGroups/.../providers/Microsoft.Compute/diskEncryptionSets/..."`. -+ -. Obtain the identity of the cluster service principal by running the following command: -+ -[source,terminal] ----- -$ az identity show -g \// <1> - -n \// <2> - --query principalId --out tsv ----- -<1> Specifies the name of the cluster resource group created by the installation program. -<2> Specifies the name of the cluster service principal created by the installation program. -The identity is in the format of `12345678-1234-1234-1234-1234567890`. -ifdef::azure-gov[] -. Create a role assignment that grants the cluster service principal `Contributor` privileges to the disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --assignee \// <1> - --role 'Contributor' \// - --scope \// <2> ----- -<1> Specifies the ID of the cluster service principal obtained in the previous step. -<2> Specifies the ID of the disk encryption set. -endif::azure-gov[] -ifdef::azure-public[] -. Create a role assignment that grants the cluster service principal necessary privileges to the disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --assignee \// <1> - --role \// <2> - --scope \// <3> ----- -<1> Specifies the ID of the cluster service principal obtained in the previous step. -<2> Specifies the Azure role name. You can use the `Contributor` role or a custom role with the necessary permissions. -<3> Specifies the ID of the disk encryption set. -endif::azure-public[] -+ -. Create a storage class that uses the user-managed disk encryption set: -.. Save the following storage class definition to a file, for example `storage-class-definition.yaml`: -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: managed-premium -provisioner: kubernetes.io/azure-disk -parameters: - skuname: Premium_LRS - kind: Managed - diskEncryptionSetID: "" <1> - resourceGroup: "" <2> -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: WaitForFirstConsumer ----- -<1> Specifies the ID of the disk encryption set that you created in the prerequisite steps, for example `"/subscriptions/xxxxxx-xxxxx-xxxxx/resourceGroups/test-encryption/providers/Microsoft.Compute/diskEncryptionSets/disk-encryption-set-xxxxxx"`. -<2> Specifies the name of the resource group used by the installer. This is the same resource group from the first step. -.. Create the storage class `managed-premium` from the file you created by running the following command: -+ -[source,terminal] ----- -$ oc create -f storage-class-definition.yaml ----- -. Select the `managed-premium` storage class when you create persistent volumes to use encrypted storage. - - - -ifeval::["{context}" == "installing-azure-customizations"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure-public: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-increasing-limits.adoc b/modules/installation-azure-increasing-limits.adoc deleted file mode 100644 index 2e60107fdd1a..000000000000 --- a/modules/installation-azure-increasing-limits.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-azure-increasing-limits_{context}"] -= Increasing Azure account limits - -To increase an account limit, file a support request on the Azure portal. -[NOTE] -==== -You can increase only one type of quota per support request. -==== - -.Procedure - -. From the Azure portal, click *Help + support* in the lower left corner. - -. Click *New support request* and then select the required values: -.. From the *Issue type* list, select *Service and subscription limits (quotas)*. -.. From the *Subscription* list, select the subscription to modify. -.. From the *Quota type* list, select the quota to increase. For example, select -*Compute-VM (cores-vCPUs) subscription limit increases* to increase the number -of vCPUs, which is required to install a cluster. -.. Click *Next: Solutions*. - -. On the *Problem Details* page, provide the required information for your quota -increase: -.. Click *Provide details* and provide the required details in the *Quota details* window. -.. In the SUPPORT METHOD and CONTACT INFO sections, provide the issue severity -and your contact details. - -. Click *Next: Review + create* and then click *Create*. diff --git a/modules/installation-azure-limits.adoc b/modules/installation-azure-limits.adoc deleted file mode 100644 index 4a85205a3bc4..000000000000 --- a/modules/installation-azure-limits.adoc +++ /dev/null @@ -1,232 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -:upi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:ash: -:upi: -:cp: Azure Stack Hub -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -:upi: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:cp: Azure -endif::[] - -:_content-type: REFERENCE -[id="installation-azure-limits_{context}"] -= {cp} account limits - -ifndef::ash[] -The {product-title} cluster uses a number of Microsoft {cp} components, and the default link:https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits[Azure subscription and service limits, quotas, and constraints] affect your ability to install {product-title} clusters. - -[IMPORTANT] -==== -Default limits vary by offer category types, such as Free Trial and Pay-As-You-Go, and by series, such as Dv2, F, and G. For example, the default for Enterprise Agreement subscriptions is 350 cores. - -Check the limits for your subscription type and if necessary, increase quota limits for your account before you install a default -cluster on Azure. -==== -endif::ash[] -ifdef::ash[] -The {product-title} cluster uses a number of Microsoft Azure Stack Hub components, and the default link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-quota-types?view=azs-2102[Quota types in Azure Stack Hub] affect your ability to install {product-title} clusters. -endif::ash[] - -The following table summarizes the {cp} components whose limits can impact your -ability to install and run {product-title} clusters. - -ifndef::ash[] -[cols="2a,3a,3a,8a",options="header"] -|=== -|Component |Number of components required by default| Default {cp} limit |Description -endif::ash[] -ifdef::ash[] -[cols="2a,3a,8a",options="header"] -|=== -|Component |Number of components required by default |Description -endif::ash[] - -|vCPU -ifndef::ash[] -ifndef::upi[] -|44 -endif::upi[] -ifdef::upi[] -|40 -endif::upi[] -|20 per region -ifndef::upi[] -|A default cluster requires 44 vCPUs, so you must increase the account limit. -endif::upi[] -ifdef::upi[] -|A default cluster requires 40 vCPUs, so you must increase the account limit. -endif::upi[] - -By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane machines -* Three compute machines - -ifndef::upi[] -Because the bootstrap and control plane machines use `Standard_D8s_v3` virtual -machines, which use 8 vCPUs, and the compute machines use `Standard_D4s_v3` -virtual machines, which use 4 vCPUs, a default cluster requires 44 vCPUs. -The bootstrap node VM, which uses 8 vCPUs, is used only during installation. -endif::upi[] -ifdef::upi[] -Because the bootstrap machine uses `Standard_D4s_v3` machines, which use 4 vCPUs, -the control plane machines use `Standard_D8s_v3` virtual -machines, which use 8 vCPUs, and the worker machines use `Standard_D4s_v3` -virtual machines, which use 4 vCPUs, a default cluster requires 40 vCPUs. -The bootstrap node VM, which uses 4 vCPUs, is used only during installation. -endif::upi[] -endif::ash[] -ifdef::ash[] -|56 -|A default cluster requires 56 vCPUs, so you must increase the account limit. - -By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane machines -* Three compute machines - -Because the bootstrap, control plane, and worker machines use `Standard_DS4_v2` virtual machines, which use 8 vCPUs, a default cluster requires 56 vCPUs. The bootstrap node VM is used only during installation. -endif::ash[] - -To deploy more worker nodes, enable autoscaling, deploy large workloads, or use -a different instance type, you must further increase the vCPU limit for your -account to ensure that your cluster can deploy the machines that you require. - -ifndef::ash[] -By default, the installation program distributes control plane and compute machines across -link:https://azure.microsoft.com/en-us/global-infrastructure/availability-zones/[all availability zones] -within -link:https://azure.microsoft.com/en-us/global-infrastructure/regions[a region]. -To ensure high availability for your cluster, select a region with at least -three availability zones. If your region contains fewer than three availability -zones, the installation program places more than one control plane machine in the -available zones. -endif::ash[] - -ifndef::ash[] -|OS Disk -|7 -| -|Each cluster machine must have a minimum of 100 GB of storage and 300 IOPS. While these are the minimum supported values, faster storage is recommended for production clusters and clusters with intensive workloads. For more information about optimizing storage for performance, see the page titled "Optimizing storage" in the "Scalability and performance" section. -endif::ash[] - -|VNet -| 1 -ifndef::ash[] -| 1000 per region -endif::ash[] -| Each default cluster requires one Virtual Network (VNet), which contains two -subnets. - -|Network interfaces -|7 -ifndef::ash[] -|65,536 per region -endif::ash[] -|Each default cluster requires seven network interfaces. If you create more -machines or your deployed workloads create load balancers, your cluster uses -more network interfaces. - -|Network security groups -|2 -ifndef::ash[] -|5000 -endif::ash[] -| Each cluster creates network security groups for each subnet in the VNet. -The default cluster creates network -security groups for the control plane and for the compute node subnets: - -[horizontal] - `controlplane`:: Allows the control plane machines to be reached on port 6443 - from anywhere -`node`:: Allows worker nodes to be reached from the internet on ports 80 and 443 - -|Network load balancers -| 3 -ifndef::ash[] -| 1000 per region -endif::ash[] -|Each cluster creates the following -link:https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-overview[load balancers]: - -[horizontal] -`default`:: Public IP address that load balances requests to ports 80 and 443 across worker machines -`internal`:: Private IP address that load balances requests to ports 6443 and 22623 across control plane machines -`external`:: Public IP address that load balances requests to port 6443 across control plane machines - -If your applications create more Kubernetes `LoadBalancer` service objects, -your cluster uses more load balancers. - -|Public IP addresses -ifndef::ash[] -|3 -| -|Each of the two public load balancers uses a public IP address. The bootstrap -machine also uses a public IP address so that you can SSH into the -machine to troubleshoot issues during installation. The IP address for the -bootstrap node is used only during installation. -endif::ash[] -ifdef::ash[] -|2 -|The public load balancer uses a public IP address. The bootstrap -machine also uses a public IP address so that you can SSH into the -machine to troubleshoot issues during installation. The IP address for the -bootstrap node is used only during installation. -endif::ash[] - -|Private IP addresses -|7 -ifndef::ash[] -| -endif::ash[] -|The internal load balancer, each of the three control plane machines, and each -of the three worker machines each use a private IP address. - -ifndef::ash[] -|Spot VM vCPUs (optional) -|0 - -If you configure spot VMs, your cluster must have two spot VM vCPUs for every compute node. -|20 per region -|This is an optional component. To use spot VMs, you must increase the Azure default limit to at least twice the number of compute nodes in your cluster. -[NOTE] -==== -Using spot VMs for control plane nodes is not recommended. -==== -endif::ash[] -|=== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:!ash: -:!cp: Azure Stack Hub -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:!cp: Azure -endif::[] diff --git a/modules/installation-azure-marketplace-subscribe.adoc b/modules/installation-azure-marketplace-subscribe.adoc deleted file mode 100644 index bacdb4b01f84..000000000000 --- a/modules/installation-azure-marketplace-subscribe.adoc +++ /dev/null @@ -1,221 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-azure-customizations.adoc -// * installing/installing_aws/installing-azure-user-infra.adoc -// * machine_management/creating-machineset-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "installing-azure-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:upi: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:mapi: -endif::[] - -//mpytlak: The procedure differs depending on whether this module is used in an IPI or UPI assembly. -//jrouth: Also some variations for when it appears in the machine management content (`mapi`). - -:_content-type: PROCEDURE -[id="installation-azure-marketplace-subscribe_{context}"] -= Selecting an Azure Marketplace image -ifndef::mapi[] -If you are deploying an {product-title} cluster using the Azure Marketplace offering, you must first obtain the Azure Marketplace image. The installation program uses this image to deploy worker nodes. When obtaining your image, consider the following: -endif::mapi[] -ifdef::mapi[] -You can create a machine set running on Azure that deploys machines that use the Azure Marketplace offering. To use this offering, you must first obtain the Azure Marketplace image. When obtaining your image, consider the following: -endif::mapi[] - -* While the images are the same, the Azure Marketplace publisher is different depending on your region. If you are located in North America, specify `redhat` as the publisher. If you are located in EMEA, specify `redhat-limited` as the publisher. -* The offer includes a `rh-ocp-worker` SKU and a `rh-ocp-worker-gen1` SKU. The `rh-ocp-worker` SKU represents a Hyper-V generation version 2 VM image. The default instance types used in {product-title} are version 2 compatible. If you plan to use an instance type that is only version 1 compatible, use the image associated with the `rh-ocp-worker-gen1` SKU. The `rh-ocp-worker-gen1` SKU represents a Hyper-V version 1 VM image. -//What happens with control plane machines? "worker" SKU seems incorrect - -[IMPORTANT] -==== -Installing images with the Azure marketplace is not supported on clusters with 64-bit ARM instances. -==== - -.Prerequisites - -* You have installed the Azure CLI client `(az)`. -* Your Azure account is entitled for the offer and you have logged into this account with the Azure CLI client. - -.Procedure - -. Display all of the available {product-title} images by running one of the following commands: -+ --- -** North America: -+ -[source,terminal] ----- -$ az vm image list --all --offer rh-ocp-worker --publisher redhat -o table ----- -+ -.Example output -[source,terminal] ----- -Offer Publisher Sku Urn Version -------------- -------------- ------------------ -------------------------------------------------------------- -------------- -rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocpworker:4.8.2021122100 4.8.2021122100 -rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table ----- -+ -.Example output -[source,terminal] ----- -Offer Publisher Sku Urn Version -------------- -------------- ------------------ -------------------------------------------------------------- -------------- -rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.8.2021122100 4.8.2021122100 -rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 ----- --- -+ -[NOTE] -==== -Regardless of the version of {product-title} that you install, the correct version of the Azure Marketplace image to use is 4.8. If required, your VMs are automatically upgraded as part of the installation process. -==== -. Inspect the image for your offer by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker: ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker: ----- -. Review the terms of the offer by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker: ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker: ----- -. Accept the terms of the offering by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker: ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker: ----- -ifdef::ipi[] -. Record the image details of your offer. You must update the `compute` section in the `install-config.yaml` file with values for `publisher`, `offer`, `sku`, and `version` before deploying the cluster. -endif::ipi[] -ifdef::upi[] -. Record the image details of your offer. If you use the Azure Resource Manager (ARM) template to deploy your worker nodes: -+ -.. Update `storageProfile.imageReference` by deleting the `id` parameter and adding the `offer`, `publisher`, `sku`, and `version` parameters by using the values from your offer. -.. Specify a `plan` for the virtual machines (VMs). -+ -.Example `06_workers.json` ARM template with an updated `storageProfile.imageReference` object and a specified `plan` -+ -[source,json,subs="none"] ----- -... - "plan" : { - "name": "rh-ocp-worker", - "product": "rh-ocp-worker", - "publisher": "redhat" - }, - "dependsOn" : [ - "[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]" - ], - "properties" : { -... - "storageProfile": { - "imageReference": { - "offer": "rh-ocp-worker", - "publisher": "redhat", - "sku": "rh-ocp-worker", - "version": "4.8.2021122100" - } - ... - } -... - } ----- - -endif::upi[] -ifdef::mapi[] -. Record the image details of your offer, specifically the values for `publisher`, `offer`, `sku`, and `version`. -endif::mapi[] - -ifdef::ipi[] -.Sample `install-config.yaml` file with the Azure Marketplace worker nodes - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- hyperthreading: Enabled - name: worker - platform: - azure: - type: Standard_D4s_v5 - osImage: - publisher: redhat - offer: rh-ocp-worker - sku: rh-ocp-worker - version: 4.8.2021122100 - replicas: 3 ----- -endif::ipi[] -ifdef::mapi[] -. Add the following parameters to the `providerSpec` section of your machine set YAML file using the image details for your offer: -+ -.Sample `providerSpec` image values for Azure Marketplace machines -[source,yaml] ----- -providerSpec: - value: - image: - offer: rh-ocp-worker - publisher: redhat - resourceID: "" - sku: rh-ocp-worker - type: MarketplaceWithPlan - version: 4.8.2021122100 ----- -//offer also has "worker" -endif::mapi[] - -ifeval::["{context}" == "installing-azure-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!upi: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:!mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!mapi: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-marketplace.adoc b/modules/installation-azure-marketplace.adoc deleted file mode 100644 index b7608a754331..000000000000 --- a/modules/installation-azure-marketplace.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-azure-account.adoc - -:_content-type: CONCEPT -[id="installation-azure-marketplace_{context}"] -= Supported Azure Marketplace regions - -Installing a cluster using the Azure Marketplace image is available to customers who purchase the offer in North America and EMEA. - -While the offer must be purchased in North America or EMEA, you can deploy the cluster to any of the Azure public partitions that {product-title} supports. - -[NOTE] -==== -Deploying a cluster using the Azure Marketplace image is not supported for the Azure Government regions. -==== diff --git a/modules/installation-azure-network-config.adoc b/modules/installation-azure-network-config.adoc deleted file mode 100644 index e23deb945e48..000000000000 --- a/modules/installation-azure-network-config.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-azure-network-config_{context}"] -= Configuring a public DNS zone in Azure - -To install {product-title}, the Microsoft Azure account you use must -have a dedicated public hosted DNS zone in your account. This zone must be -authoritative for the domain. This service provides -cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an -existing domain and registrar or obtain a new one through Azure or another source. -+ -[NOTE] -==== -For more information about purchasing domains through Azure, see -link:https://docs.microsoft.com/en-us/azure/app-service/manage-custom-dns-buy-domain[Buy a custom domain name for Azure App Service] -in the Azure documentation. -==== - -. If you are using an existing domain and registrar, migrate its DNS to Azure. See -link:https://docs.microsoft.com/en-us/azure/app-service/manage-custom-dns-migrate-domain[Migrate an active DNS name to Azure App Service] -in the Azure documentation. - -. Configure DNS for your domain. Follow the steps in the -link:https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns[Tutorial: Host your domain in Azure DNS] -in the Azure documentation to create a public hosted zone for your domain or -subdomain, extract the new authoritative name servers, and update the registrar -records for the name servers that your domain uses. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, -such as `clusters.openshiftcorp.com`. - -. If you use a subdomain, follow your company's procedures to add its delegation -records to the parent domain. diff --git a/modules/installation-azure-permissions.adoc b/modules/installation-azure-permissions.adoc deleted file mode 100644 index 7d23ada530a6..000000000000 --- a/modules/installation-azure-permissions.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -[id="installation-azure-permissions_{context}"] -= Required Azure roles - -{product-title} needs a service principal so it can manage Microsoft Azure resources. Before you can create a service principal, your Azure account subscription must have the following roles: - -* `User Access Administrator` -* `Contributor` - -To set roles on the Azure portal, see the link:https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal[Manage access to Azure resources using RBAC and the Azure portal] in the Azure documentation. \ No newline at end of file diff --git a/modules/installation-azure-preparing-diskencryptionsets.adoc b/modules/installation-azure-preparing-diskencryptionsets.adoc deleted file mode 100644 index 858b35021320..000000000000 --- a/modules/installation-azure-preparing-diskencryptionsets.adoc +++ /dev/null @@ -1,132 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_azure/enabling-disk-encryption-sets-azure.adoc - -:_content-type: PROCEDURE -[id="preparing-disk-encryption-sets"] -= Preparing an Azure Disk Encryption Set -The {product-title} installer can use an existing Disk Encryption Set with a user-managed key. To enable this feature, you can create a Disk Encryption Set in Azure and provide the key to the installer. - -.Procedure - -. Set the following environment variables for the Azure resource group by running the following command: -+ -[source,terminal] ----- -$ export RESOURCEGROUP="" \// <1> - LOCATION="" <2> ----- -<1> Specifies the name of the Azure resource group where you will create the Disk Encryption Set and encryption key. To avoid losing access to your keys after destroying the cluster, you should create the Disk Encryption Set in a different resource group than the resource group where you install the cluster. -<2> Specifies the Azure location where you will create the resource group. -+ -. Set the following environment variables for the Azure Key Vault and Disk Encryption Set by running the following command: -+ -[source,terminal] ----- -$ export KEYVAULT_NAME="" \// <1> - KEYVAULT_KEY_NAME="" \// <2> - DISK_ENCRYPTION_SET_NAME="" <3> ----- -<1> Specifies the name of the Azure Key Vault you will create. -<2> Specifies the name of the encryption key you will create. -<3> Specifies the name of the disk encryption set you will create. -+ -. Set the environment variable for the ID of your Azure Service Principal by running the following command: -+ -[source,terminal] ----- -$ export CLUSTER_SP_ID="" <1> ----- -<1> Specifies the ID of the service principal you will use for this installation. -+ -. Enable host-level encryption in Azure by running the following commands: -+ -[source,terminal] ----- -$ az feature register --namespace "Microsoft.Compute" --name "EncryptionAtHost" ----- -+ -[source,terminal] ----- -$ az feature show --namespace Microsoft.Compute --name EncryptionAtHost ----- -+ -[source,terminal] ----- -$ az provider register -n Microsoft.Compute ----- -+ -. Create an Azure Resource Group to hold the disk encryption set and associated resources by running the following command: -+ -[source,terminal] ----- -$ az group create --name $RESOURCEGROUP --location $LOCATION ----- -+ -. Create an Azure key vault by running the following command: -+ -[source,terminal] ----- -$ az keyvault create -n $KEYVAULT_NAME -g $RESOURCEGROUP -l $LOCATION \ - --enable-purge-protection true --enable-soft-delete true ----- -+ -. Create an encryption key in the key vault by running the following command: -+ -[source,terminal] ----- -$ az keyvault key create --vault-name $KEYVAULT_NAME -n $KEYVAULT_KEY_NAME \ - --protection software ----- -+ -. Capture the ID of the key vault by running the following command: -+ -[source,terminal] ----- -$ KEYVAULT_ID=$(az keyvault show --name $KEYVAULT_NAME --query "[id]" -o tsv) ----- -+ -. Capture the key URL in the key vault by running the following command: -+ -[source,terminal] ----- -$ KEYVAULT_KEY_URL=$(az keyvault key show --vault-name $KEYVAULT_NAME --name \ - $KEYVAULT_KEY_NAME --query "[key.kid]" -o tsv) ----- -+ -. Create a disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az disk-encryption-set create -n $DISK_ENCRYPTION_SET_NAME -l $LOCATION -g \ - $RESOURCEGROUP --source-vault $KEYVAULT_ID --key-url $KEYVAULT_KEY_URL ----- -+ -. Grant the DiskEncryptionSet resource access to the key vault by running the following commands: -+ -[source,terminal] ----- -$ DES_IDENTITY=$(az disk-encryption-set show -n $DISK_ENCRYPTION_SET_NAME -g \ - $RESOURCEGROUP --query "[identity.principalId]" -o tsv) ----- -+ -[source,terminal] ----- -$ az keyvault set-policy -n $KEYVAULT_NAME -g $RESOURCEGROUP --object-id \ - $DES_IDENTITY --key-permissions wrapkey unwrapkey get ----- -+ -. Grant the Azure Service Principal permission to read the DiskEncryptionSet by running the following commands: -+ -[source,terminal] ----- -$ DES_RESOURCE_ID=$(az disk-encryption-set show -n $DISK_ENCRYPTION_SET_NAME -g \ - $RESOURCEGROUP --query "[id]" -o tsv) ----- -+ -[source,terminal] ----- -$ az role assignment create --assignee $CLUSTER_SP_ID --role "" \// <1> - --scope $DES_RESOURCE_ID -o jsonc ----- -<1> Specifies an Azure role with read permissions to the disk encryption set. You can use the `Owner` role or a custom role with the necessary permissions. diff --git a/modules/installation-azure-regions.adoc b/modules/installation-azure-regions.adoc deleted file mode 100644 index 64d1540d3a7a..000000000000 --- a/modules/installation-azure-regions.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -[id="installation-azure-regions_{context}"] -= Supported Azure regions - -The installation program dynamically generates the list of available Microsoft Azure regions based on your subscription. - -[discrete] -== Supported Azure public regions - -* `australiacentral` (Australia Central) -* `australiaeast` (Australia East) -* `australiasoutheast` (Australia South East) -* `brazilsouth` (Brazil South) -* `canadacentral` (Canada Central) -* `canadaeast` (Canada East) -* `centralindia` (Central India) -* `centralus` (Central US) -* `eastasia` (East Asia) -* `eastus` (East US) -* `eastus2` (East US 2) -* `francecentral` (France Central) -//* francesouth (France South) -* `germanywestcentral` (Germany West Central) -* `japaneast` (Japan East) -* `japanwest` (Japan West) -* `koreacentral` (Korea Central) -* `koreasouth` (Korea South) -* `northcentralus` (North Central US) -* `northeurope` (North Europe) -* `norwayeast` (Norway East) -* `qatarcentral` (Qatar Central) -* `southafricanorth` (South Africa North) -//* southafricawest (South Africa West) -* `southcentralus` (South Central US) -* `southeastasia` (Southeast Asia) -* `southindia` (South India) -* `swedencentral` (Sweden Central) -* `switzerlandnorth` (Switzerland North) -//* uaecentral (UAE Central) -* `uaenorth` (UAE North) -* `uksouth` (UK South) -* `ukwest` (UK West) -* `westcentralus` (West Central US) -* `westeurope` (West Europe) -* `westindia` (West India) -* `westus` (West US) -* `westus2` (West US 2) -* `westus3` (West US 3) - -[discrete] -== Supported Azure Government regions - -Support for the following Microsoft Azure Government (MAG) regions was added in {product-title} version 4.6: - -* `usgovtexas` (US Gov Texas) -* `usgovvirginia` (US Gov Virginia) -//* usdodcentral (US DoD Central) -//* usdodeast (US DoD East) -//* usgovarizona (US Gov Arizona) -//* usgoviowa (US Gov Iowa) - -You can reference all available MAG regions in the link:https://azure.microsoft.com/en-us/global-infrastructure/geographies/#geographies[Azure documentation]. Other provided MAG regions are expected to work with {product-title}, but have not been tested. diff --git a/modules/installation-azure-service-principal.adoc b/modules/installation-azure-service-principal.adoc deleted file mode 100644 index 195a9b309e2f..000000000000 --- a/modules/installation-azure-service-principal.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:upi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-service-principal_{context}"] -= Creating a service principal - -Because {product-title} and its installation program create Microsoft Azure resources by using the Azure Resource Manager, you must create a service principal to represent it. - -.Prerequisites - -* Install or update the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-yum?view=azure-cli-latest[Azure CLI]. -* Your Azure account has the required roles for the subscription that you use. -ifdef::ipi[] -* If you want to use a custom role, you have created a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the required permissions listed in the _Required Azure permissions for installer-provisioned infrastructure_ section. -endif::ipi[] -ifdef::upi[] -* If you want to use a custom role, you have created a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the required permissions listed in the _Required Azure permissions for user-provisioned infrastructure_ section. -endif::upi[] - -.Procedure - -ifdef::ash[] -. Register your environment: -+ -[source,terminal] ----- -$ az cloud register -n AzureStackCloud --endpoint-resource-manager <1> ----- -<1> Specify the Azure Resource Manager endpoint, \`https://management../`. -+ -See the link:https://docs.microsoft.com/en-us/azure-stack/mdc/azure-stack-version-profiles-azurecli-2-tzl#connect-to-azure-stack-hub[Microsoft documentation] for details. - -. Set the active environment: -+ -[source,terminal] ----- -$ az cloud set -n AzureStackCloud ----- - -. Update your environment configuration to use the specific API version for Azure Stack Hub: -+ -[source,terminal] ----- -$ az cloud update --profile 2019-03-01-hybrid ----- -endif::ash[] - -. Log in to the Azure CLI: -+ -[source,terminal] ----- -$ az login ----- -ifdef::ash[] -+ -If you are in a multitenant environment, you must also supply the tenant ID. -endif::ash[] - -. If your Azure account uses subscriptions, ensure that you are using the right -subscription: - -.. View the list of available accounts and record the `tenantId` value for the -subscription you want to use for your cluster: -+ -[source,terminal] ----- -$ az account list --refresh ----- -+ -.Example output -[source,terminal] ----- -[ - { -ifndef::ash[] - "cloudName": "AzureCloud", -endif::[] -ifdef::ash[] - "cloudName": AzureStackCloud", -endif::[] - "id": "9bab1460-96d5-40b3-a78e-17b15e978a80", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee", - "user": { - "name": "you@example.com", - "type": "user" - } - } -] ----- - -.. View your active account details and confirm that the `tenantId` value matches -the subscription you want to use: -+ -[source,terminal] ----- -$ az account show ----- -+ -.Example output -[source,terminal] ----- -{ -ifndef::ash[] - "environmentName": "AzureCloud", -endif::[] -ifdef::ash[] - "environmentName": AzureStackCloud", -endif::[] - "id": "9bab1460-96d5-40b3-a78e-17b15e978a80", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee", <1> - "user": { - "name": "you@example.com", - "type": "user" - } -} ----- -<1> Ensure that the value of the `tenantId` parameter is the correct subscription ID. - -.. If you are not using the right subscription, change the active subscription: -+ -[source,terminal] ----- -$ az account set -s <1> ----- -<1> Specify the subscription ID. - -.. Verify the subscription ID update: -+ -[source,terminal] ----- -$ az account show ----- -+ -.Example output -[source,terminal] ----- -{ -ifndef::ash[] - "environmentName": "AzureCloud", -endif::[] -ifdef::ash[] - "environmentName": AzureStackCloud", -endif::[] - "id": "33212d16-bdf6-45cb-b038-f6565b61edda", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee", - "user": { - "name": "you@example.com", - "type": "user" - } -} ----- - -. Record the `tenantId` and `id` parameter values from the output. You need these values during the {product-title} installation. - -ifdef::ash[] -. Create the service principal for your account: -+ -[source,terminal] ----- -$ az ad sp create-for-rbac --role Contributor --name \ <1> - --scopes /subscriptions/ <2> - --years <3> ----- -<1> Specify the service principal name. -<2> Specify the subscription ID. -<3> Specify the number of years. By default, a service principal expires in one year. By using the `--years` option you can extend the validity of your service principal. -+ -.Example output -[source,terminal] ----- -Creating 'Contributor' role assignment under scope '/subscriptions/' -The output includes credentials that you must protect. Be sure that you do not -include these credentials in your code or check the credentials into your source -control. For more information, see https://aka.ms/azadsp-cli -{ - "appId": "ac461d78-bf4b-4387-ad16-7e32e328aec6", - "displayName": ", - "password": "00000000-0000-0000-0000-000000000000", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee" -} ----- -endif::ash[] - -ifndef::ash[] -. Create the service principal for your account: -+ -[source,terminal] ----- -$ az ad sp create-for-rbac --role \// <1> - --name \// <2> - --scopes /subscriptions/ <3> ----- -<1> Defines the role name. You can use the `Contributor` role, or you can specify a custom role which contains the necessary permissions. -<2> Defines the service principal name. -<3> Specifies the subscription ID. -+ -.Example output -[source,terminal] ----- -Creating 'Contributor' role assignment under scope '/subscriptions/' -The output includes credentials that you must protect. Be sure that you do not -include these credentials in your code or check the credentials into your source -control. For more information, see https://aka.ms/azadsp-cli -{ - "appId": "ac461d78-bf4b-4387-ad16-7e32e328aec6", - "displayName": ", - "password": "00000000-0000-0000-0000-000000000000", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee" -} ----- -endif::ash[] - -. Record the values of the `appId` and `password` parameters from the previous -output. You need these values during {product-title} installation. - -ifndef::ash[] -. If you applied the `Contributor` role to your service principal, assign the `User Administrator Access` role by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --role "User Access Administrator" \ - --assignee-object-id $(az ad sp show --id --query id -o tsv) <1> ----- -<1> Specify the `appId` parameter value for your service principal. -endif::ash[] - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!upi: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-stack-hub-config-yaml.adoc b/modules/installation-azure-stack-hub-config-yaml.adoc deleted file mode 100644 index a4717e2b1223..000000000000 --- a/modules/installation-azure-stack-hub-config-yaml.adoc +++ /dev/null @@ -1,220 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash-network: -endif::[] - -[id="installation-azure-stack-hub-config-yaml_{context}"] -= Sample customized install-config.yaml file for Azure Stack Hub - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. Use it as a resource to enter parameter values into the installation configuration file that you created manually. -==== - -ifdef::ash[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: <1> - name: master - platform: - azure: - osDisk: - diskSizeGB: 1024 <2> - diskType: premium_LRS - replicas: 3 -compute: <1> -- name: worker - platform: - azure: - osDisk: - diskSizeGB: 512 <2> - diskType: premium_LRS - replicas: 0 -metadata: - name: test-cluster <3> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <4> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - armEndpoint: azurestack_arm_endpoint <5> - baseDomainResourceGroupName: resource_group <6> - region: azure_stack_local_region <7> - resourceGroupName: existing_resource_group <8> - outboundType: Loadbalancer - cloudName: AzureStackCloud <9> -pullSecret: '{"auths": ...}' <10> -ifndef::openshift-origin[] -fips: false <11> -additionalTrustBundle: | <12> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <11> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin[] ----- -<1> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<2> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -<3> Specify the name of the cluster. -<4> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<5> Specify the Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -<6> Specify the name of the resource group that contains the DNS zone for your base domain. -<7> Specify the name of your Azure Stack Hub local region. -<8> Specify the name of an already existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -<9> Specify the Azure Stack Hub environment as your target platform. -<10> Specify the pull secret required to authenticate your cluster. -ifndef::openshift-origin[] -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<12> If your Azure Stack Hub environment uses an internal certificate authority (CA), add the necessary certificate bundle in `.pem` format. -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> If your Azure Stack Hub environment uses an internal certificate authority (CA), add the necessary certificate bundle in `.pem` format. -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::ash[] - -ifdef::ash-default,ash-network[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -credentialsMode: Manual -controlPlane: <2> <3> - name: master - platform: - azure: - osDisk: - diskSizeGB: 1024 <4> - diskType: premium_LRS - replicas: 3 -compute: <2> -- name: worker - platform: - azure: - osDisk: - diskSizeGB: 512 <4> - diskType: premium_LRS - replicas: 3 -metadata: - name: test-cluster <1> <5> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - armEndpoint: azurestack_arm_endpoint <1> <7> - baseDomainResourceGroupName: resource_group <1> <8> - region: azure_stack_local_region <1> <9> - resourceGroupName: existing_resource_group <10> - outboundType: Loadbalancer - cloudName: AzureStackCloud <1> - clusterOSimage: https://vhdsa.blob.example.example.com/vhd/rhcos-410.84.202112040202-0-azurestack.x86_64.vhd <1> <11> -pullSecret: '{"auths": ...}' <1> <12> -ifndef::openshift-origin[] -fips: false <13> -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA...<13> -endif::openshift-origin[] -ifndef::openshift-origin[] -additionalTrustBundle: | <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <14> -endif::openshift-origin[] - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- ----- -<1> Required. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<4> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -<5> The name of the cluster. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<7> The Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -<8> The name of the resource group that contains the DNS zone for your base domain. -<9> The name of your Azure Stack Hub local region. -<10> The name of an existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -<11> The URL of a storage blob in the Azure Stack environment that contains an {op-system} VHD. -<12> The pull secret required to authenticate your cluster. -ifndef::openshift-origin[] -<13> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifndef::openshift-origin[] -<15> If the Azure Stack Hub environment is using an internal Certificate Authority (CA), adding the CA certificate is required. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> If the Azure Stack Hub environment is using an internal Certificate Authority (CA), adding the CA certificate is required. -endif::openshift-origin[] - -endif::ash-default,ash-network[] - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash-network: -endif::[] diff --git a/modules/installation-azure-stack-hub-network-config.adoc b/modules/installation-azure-stack-hub-network-config.adoc deleted file mode 100644 index fe2129ed4101..000000000000 --- a/modules/installation-azure-stack-hub-network-config.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -[id="installation-azure-stack-hub-network-config_{context}"] -= Configuring a DNS zone in Azure Stack Hub - -To successfully install {product-title} on Azure Stack Hub, you must create DNS records in an Azure Stack Hub DNS zone. The DNS zone must be authoritative for the domain. To delegate a registrar's DNS zone to Azure Stack Hub, see Microsoft's documentation for link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-integrate-dns?view=azs-2102[Azure Stack Hub datacenter DNS integration]. diff --git a/modules/installation-azure-stack-hub-permissions.adoc b/modules/installation-azure-stack-hub-permissions.adoc deleted file mode 100644 index fd442632162d..000000000000 --- a/modules/installation-azure-stack-hub-permissions.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -[id="installation-azure-stack-hub-permissions_{context}"] -= Required Azure Stack Hub roles - -Your Microsoft Azure Stack Hub account must have the following roles for the subscription that you use: - -* `Owner` - -To set roles on the Azure portal, see the link:https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-manage-permissions?view=azs-2102[Manage access to resources in Azure Stack Hub with role-based access control] in the Microsoft documentation. diff --git a/modules/installation-azure-tested-machine-types.adoc b/modules/installation-azure-tested-machine-types.adoc deleted file mode 100644 index d0ead0fbd221..000000000000 --- a/modules/installation-azure-tested-machine-types.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_azure/installing-azure-customizations.adoc -// installing/installing_azure/installing-azure-government-region.adoc -// installing/installing_azure/installing-azure-network-customizations.adoc -// installing/installing_azure/installing-azure-private.adoc -// installing/installing_azure/installing-azure-user-infra.adoc -// installing/installing_azure/installing-azure-vnet.adoc - -[id="installation-azure-tested-machine-types_{context}"] -= Tested instance types for Azure - -The following Microsoft Azure instance types have been tested with {product-title}. - -.Machine types based on 64-bit x86 architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/azure/tested_instance_types_x86_64.md[] -==== diff --git a/modules/installation-azure-user-defined-routing.adoc b/modules/installation-azure-user-defined-routing.adoc deleted file mode 100644 index f06596a66d59..000000000000 --- a/modules/installation-azure-user-defined-routing.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-private.adoc - -[id="installation-azure-user-defined-routing_{context}"] -= User-defined outbound routing - -In {product-title}, you can choose your own outbound routing for a cluster to -connect to the internet. This allows you to skip the creation of public IP -addresses and the public load balancer. - -You can configure user-defined routing by modifying parameters in the -`install-config.yaml` file before installing your cluster. A pre-existing VNet -is required to use outbound routing when installing a cluster; the installation -program is not responsible for configuring this. - -When configuring a cluster to use user-defined routing, the installation program -does not create the following resources: - -* Outbound rules for access to the internet. -* Public IPs for the public load balancer. -* Kubernetes Service object to add the cluster machines to the public load -balancer for outbound requests. - -You must ensure the following items are available before setting user-defined -routing: - -* Egress to the internet is possible to pull container images, unless using an -{product-registry} mirror. -* The cluster can access Azure APIs. -* Various allowlist endpoints are configured. You can reference these endpoints -in the _Configuring your firewall_ section. - -There are several pre-existing networking setups that are supported for internet -access using user-defined routing. - -[discrete] -== Private cluster with network address translation - -You can use link:https://docs.microsoft.com/en-us/azure/virtual-network/nat-overview[Azure VNET network address translation (NAT)] -to provide outbound internet access for the subnets in your cluster. You can -reference -link:https://docs.microsoft.com/en-us/azure/virtual-network/quickstart-create-nat-gateway-cli[Create a NAT gateway using Azure CLI] -in the Azure documentation for configuration instructions. - -When using a VNet setup with Azure NAT and user-defined routing configured, you -can create a private cluster with no public endpoints. - -[discrete] -== Private cluster with Azure Firewall - -You can use Azure Firewall to provide outbound routing for the VNet used to -install the cluster. You can learn more about -link:https://docs.microsoft.com/en-us/azure/aks/egress-outboundtype#deploy-a-cluster-with-outbound-type-of-udr-and-azure-firewall[providing user-defined routing with Azure Firewall] -in the Azure documentation. - -When using a VNet setup with Azure Firewall and user-defined routing configured, -you can create a private cluster with no public endpoints. - -[discrete] -== Private cluster with a proxy configuration - -You can use a proxy with user-defined routing to allow egress to the internet. -You must ensure that cluster Operators do not access Azure APIs using a -proxy; Operators must have access to Azure APIs outside of the proxy. - -When using the default route table for subnets, with `0.0.0.0/0` populated -automatically by Azure, all Azure API requests are routed over Azure's internal -network even though the IP addresses are public. As long as the Network Security -Group rules allow egress to Azure API endpoints, proxies with user-defined -routing configured allow you to create private clusters with no public -endpoints. - -[discrete] -== Private cluster with no internet access - -You can install a private network that restricts all access to the internet, except the Azure API. This is accomplished by mirroring the release image registry locally. Your cluster must have access to the following: - -* An {product-registry} mirror that allows for pulling container images -* Access to Azure APIs - -With these requirements available, you can use user-defined routing to create -private clusters with no public endpoints. diff --git a/modules/installation-azure-user-infra-completing.adoc b/modules/installation-azure-user-infra-completing.adoc deleted file mode 100644 index 2cc4fb5a7edd..000000000000 --- a/modules/installation-azure-user-infra-completing.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-completing_{context}"] -= Completing an {cp} installation on user-provisioned infrastructure - -After you start the {product-title} installation on Microsoft {cp} -user-provisioned infrastructure, you can monitor the cluster events until the -cluster is ready. - -.Prerequisites - -* Deploy the bootstrap machine for an {product-title} cluster on user-provisioned {cp} infrastructure. -* Install the `oc` CLI and log in. - -.Procedure - -* Complete the cluster installation: -+ -[source,terminal] ----- -$ ./openshift-install --dir wait-for install-complete <1> ----- -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the cluster to initialize... ----- -<1> For ``, specify the path to the directory that you -stored the installation files in. -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-user-infra-deploying-rhcos.adoc b/modules/installation-azure-user-infra-deploying-rhcos.adoc deleted file mode 100644 index 7dcb6ecc7f7a..000000000000 --- a/modules/installation-azure-user-infra-deploying-rhcos.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-deploying-rhcos_{context}"] -= Deploying the {op-system} cluster image for the {cp} infrastructure - -You must use a valid {op-system-first} image for Microsoft {cp} for your -{product-title} nodes. - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -* Store the {op-system} virtual hard disk (VHD) cluster image in an Azure storage container. - -* Store the bootstrap Ignition config file in an Azure storage container. - -.Procedure - -. Copy the template from the *ARM template for image storage* section of -this topic and save it as `02_storage.json` in your cluster's installation directory. This template -describes the image storage that your cluster requires. - -. Export the {op-system} VHD blob URL as a variable: -+ -[source,terminal] ----- -$ export VHD_BLOB_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c vhd -n "rhcos.vhd" -o tsv` ----- - -. Deploy the cluster image: -+ -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "/02_storage.json" \ - --parameters vhdBlobURL="${VHD_BLOB_URL}" \ <1> - --parameters baseName="${INFRA_ID}" \ <2> - --parameters storageAccount="${CLUSTER_NAME}sa" \ <3> - --parameters architecture="" <4> ----- -<1> The blob URL of the {op-system} VHD to be used to create master and worker machines. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -<3> The name of your Azure storage account. -<4> Specify the system architecture. Valid values are `x64` (default) or `Arm64`. - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-user-infra-uploading-rhcos.adoc b/modules/installation-azure-user-infra-uploading-rhcos.adoc deleted file mode 100644 index b244c84d2f99..000000000000 --- a/modules/installation-azure-user-infra-uploading-rhcos.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash-ipi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash-ipi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-uploading-rhcos_{context}"] -ifndef::ash-ipi[] -= Uploading the {op-system} cluster image and bootstrap Ignition config file -endif::ash-ipi[] -ifdef::ash-ipi[] -= Uploading the {op-system} cluster image -endif::ash-ipi[] - -ifndef::ash-ipi[] -The Azure client does not support deployments based on files existing locally. You -must copy and store the {op-system} virtual hard disk (VHD) cluster image and bootstrap Ignition config file in a storage container so they are accessible during deployment. -endif::ash-ipi[] - -ifdef::ash-ipi[] -You must download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. -endif::ash-ipi[] - -.Prerequisites - -* Configure an Azure account. -ifndef::ash-ipi[] -* Generate the Ignition config files for your cluster. -endif::ash-ipi[] - -.Procedure - -ifndef::ash-ipi[] -. Create an Azure storage account to store the VHD cluster image: -+ -[source,terminal] ----- -$ az storage account create -g ${RESOURCE_GROUP} --location ${AZURE_REGION} --name ${CLUSTER_NAME}sa --kind Storage --sku Standard_LRS ----- -+ -[WARNING] -==== -The Azure storage account name must be between 3 and 24 characters in length and -use numbers and lower-case letters only. If your `CLUSTER_NAME` variable does -not follow these restrictions, you must manually define the Azure storage -account name. For more information on Azure storage account name restrictions, -see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/error-storage-account-name[Resolve errors for storage account names] -in the Azure documentation. -==== - -. Export the storage account key as an environment variable: -+ -[source,terminal] ----- -$ export ACCOUNT_KEY=`az storage account keys list -g ${RESOURCE_GROUP} --account-name ${CLUSTER_NAME}sa --query "[0].value" -o tsv` ----- - -. Export the URL of the {op-system} VHD to an environment variable: -+ -ifdef::azure[] -[source,terminal] ----- -$ export VHD_URL=`openshift-install coreos print-stream-json | jq -r '.architectures.."rhel-coreos-extensions"."azure-disk".url'` ----- -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ export COMPRESSED_VHD_URL=$(openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.azurestack.formats."vhd.gz".disk.location') ----- -endif::ash[] -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must specify an image with the highest version that is -less than or equal to the {product-title} version that you install. Use the image version -that matches your {product-title} version if it is available. -==== - -. Create the storage container for the VHD: -+ -[source,terminal] ----- -$ az storage container create --name vhd --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} ----- -ifdef::ash[] -. Download the compressed {op-system} VHD file locally: -+ -[source,terminal] ----- -$ curl -O -L ${COMPRESSED_VHD_URL} ----- - -. Decompress the VHD file. -+ -[NOTE] -==== -The decompressed VHD file is approximately 16 GB, so be sure that your host system has 16 GB of free space available. You can delete the VHD file after you upload it. -==== -endif::ash[] - -. Copy the local VHD to a blob: -+ -ifdef::azure[] -[source,terminal] ----- -$ az storage blob copy start --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} --destination-blob "rhcos.vhd" --destination-container vhd --source-uri "${VHD_URL}" ----- -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c vhd -n "rhcos.vhd" -f rhcos--azurestack.x86_64.vhd ----- -endif::ash[] - -. Create a blob storage container and upload the generated `bootstrap.ign` file: -+ -[source,terminal] ----- -$ az storage container create --name files --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} ----- -+ -[source,terminal] ----- -$ az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c "files" -f "/bootstrap.ign" -n "bootstrap.ign" ----- -endif::ash-ipi[] - -ifdef::ash-ipi[] -. Obtain the {op-system} VHD cluster image: -.. Export the URL of the {op-system} VHD to an environment variable. -+ -[source,terminal] ----- -$ export COMPRESSED_VHD_URL=$(openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.azurestack.formats."vhd.gz".disk.location') ----- -.. Download the compressed {op-system} VHD file locally. -+ -[source,terminal] ----- -$ curl -O -L ${COMPRESSED_VHD_URL} ----- -. Decompress the VHD file. -+ -[NOTE] -==== -The decompressed VHD file is approximately 16 GB, so be sure that your host system has 16 GB of free space available. The VHD file can be deleted once you have uploaded it. -==== -. Upload the local VHD to the Azure Stack Hub environment, making sure that the blob is publicly available. For example, you can upload the VHD to a blob using the `az` cli or the web portal. -endif::ash-ipi[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash-ipi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash-ipi: -endif::[] diff --git a/modules/installation-azure-user-infra-wait-for-bootstrap.adoc b/modules/installation-azure-user-infra-wait-for-bootstrap.adoc deleted file mode 100644 index c30d1e24036d..000000000000 --- a/modules/installation-azure-user-infra-wait-for-bootstrap.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-wait-for-bootstrap_{context}"] -= Wait for bootstrap completion and remove bootstrap resources in {cp} - -After you create all of the required infrastructure in Microsoft {cp}, wait for -the bootstrap process to complete on the machines that you provisioned by using -the Ignition config files that you generated with the installation program. - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. -* Create and configure networking and load balancers in {cp}. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. - -.Procedure - -. Change to the directory that contains the installation program and run the -following command: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete --dir \ <1> - --log-level info <2> ----- -<1> For ``, specify the path to the directory that you -stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -+ -If the command exits without a `FATAL` warning, your production control plane -has initialized. - -. Delete the bootstrap resources: -+ -[source,terminal] ----- -$ az network nsg rule delete -g ${RESOURCE_GROUP} --nsg-name ${INFRA_ID}-nsg --name bootstrap_ssh_in -$ az vm stop -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap -$ az vm deallocate -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap -$ az vm delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap --yes -$ az disk delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap_OSDisk --no-wait --yes -$ az network nic delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-nic --no-wait -$ az storage blob delete --account-key ${ACCOUNT_KEY} --account-name ${CLUSTER_NAME}sa --container-name files --name bootstrap.ign -$ az network public-ip delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-ssh-pip ----- - -[NOTE] -==== -If you do not delete the bootstrap server, installation may not succeed due to API traffic being routed to the bootstrap server. -==== - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-bare-metal-agent-installer-config-yaml.adoc b/modules/installation-bare-metal-agent-installer-config-yaml.adoc deleted file mode 100644 index 20cd5025e86b..000000000000 --- a/modules/installation-bare-metal-agent-installer-config-yaml.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: - -//* installing-with-agent/installing-with-agent.adoc -// Re-used content from Sample install-config.yaml file for bare metal without conditionals - -:_content-type: CONCEPT -[id="installation-bare-metal-agent-installer-config-yaml_{context}"] -= Sample install-config.yaml file for bare metal - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- name: worker - replicas: 0 <3> -controlPlane: <2> - name: master - replicas: 1 <4> -metadata: - name: sno-cluster <5> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <6> - hostPrefix: 23 <7> - networkType: OVNKubernetes <8> - serviceNetwork: <9> - - 172.30.0.0/16 -platform: - none: {} <10> -fips: false <11> -pullSecret: '{"auths": ...}' <12> -sshKey: 'ssh-ed25519 AAAA...' <13> ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<3> This parameter controls the number of compute machines that the Agent-based installation waits to discover before triggering the installation process. It is the number of compute machines that must be booted with the generated ISO. - -+ -[NOTE] -==== -If you are installing a three-node cluster, do not deploy any compute machines when you install the {op-system-first} machines. -==== -+ -<4> The number of control plane machines that you add to the cluster. Because the cluster uses these values as the number of etcd endpoints in the cluster, the value must match the number of control plane machines that you deploy. -<5> The cluster name that you specified in your DNS records. -<6> A block of IP addresses from which pod IP addresses are allocated. This block must not overlap with existing physical networks. These IP addresses are used for the pod network. If you need to access the pods from an external network, you must configure load balancers and routers to manage the traffic. -+ -[NOTE] -==== -Class E CIDR range is reserved for a future use. To use the Class E CIDR range, you must ensure your networking environment accepts the IP addresses within the Class E CIDR range. -==== -+ -<7> The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `/23` subnet out of the given `cidr`, which allows for 510 (2^(32 - 23) - 2) pod IP addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. -<8> The cluster network plugin to install. The supported values are `OVNKubernetes` (default value) and `OpenShiftSDN`. -<9> The IP address pool to use for service IP addresses. You can enter only one IP address pool. This block must not overlap with existing physical networks. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. -<10> You must set the platform to `none` for a single-node cluster. You can set the platform to either `vsphere` or `baremetal` for multi-node clusters. -+ -[NOTE] -==== -If you set the platform to `vsphere` or `baremetal`, you can configure IP address endpoints for cluster nodes in three ways: - -* IPv4 -* IPv6 -* IPv4 and IPv6 in parallel (dual-stack) - -.Example of dual-stack networking -[source,yaml] ----- -networking: - clusterNetwork: - - cidr: 172.21.0.0/16 - hostPrefix: 23 - - cidr: fd02::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 192.168.11.0/16 - - cidr: 2001:DB8::/32 - serviceNetwork: - - 172.22.0.0/16 - - fd03::/112 - networkType: OVNKubernetes -platform: - baremetal: - apiVIPs: - - 192.168.11.3 - - 2001:DB8::4 - ingressVIPs: - - 192.168.11.4 - - 2001:DB8::5 ----- -==== -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== - -<12> This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -<13> The SSH public key for the `core` user in {op-system-first}. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== diff --git a/modules/installation-bare-metal-config-yaml.adoc b/modules/installation-bare-metal-config-yaml.adoc deleted file mode 100644 index dfa6976cf57f..000000000000 --- a/modules/installation-bare-metal-config-yaml.adoc +++ /dev/null @@ -1,337 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc - - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:agnostic: -endif::[] - -:_content-type: CONCEPT -// Assumption is that attribute once outside ifdef works for several level one headings. -[id="installation-bare-metal-config-yaml_{context}"] -ifndef::ibm-z,ibm-z-kvm,ibm-power,agnostic[] -= Sample install-config.yaml file for bare metal -endif::ibm-z,ibm-z-kvm,ibm-power,agnostic[] -ifdef::ibm-z,ibm-z-kvm[] -= Sample install-config.yaml file for {ibmzProductName} -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-power[] -= Sample install-config.yaml file for {ibmpowerProductName} -endif::ibm-power[] -ifdef::agnostic[] -= Sample install-config.yaml file for other platforms -endif::agnostic[] - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- hyperthreading: Enabled <3> - name: worker - replicas: 0 <4> -ifeval::["{context}" == "installing-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-power"] - architecture: ppc64le -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] - architecture : ppc64le -endif::[] -controlPlane: <2> - hyperthreading: Enabled <3> - name: master - replicas: 3 <5> -ifeval::["{context}" == "installing-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-power"] - architecture: ppc64le -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] - architecture: ppc64le -endif::[] -metadata: - name: test <6> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <7> - hostPrefix: 23 <8> - networkType: OVNKubernetes <9> - serviceNetwork: <10> - - 172.30.0.0/16 -platform: - none: {} <11> -ifndef::openshift-origin[] -fips: false <12> -endif::openshift-origin[] -ifndef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths": ...}' <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths": ...}' <12> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <13> -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths":{"": {"auth": "","email": "you@example.com"}}}' <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths":{"": {"auth": "","email": "you@example.com"}}}' <12> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <13> -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <16> -- mirrors: -ifdef::ibm-z,ibm-z-kvm[] - - /ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - /ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -ifndef::ibm-z,ibm-z-kvm[] - - //release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <14> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <15> -- mirrors: -ifdef::ibm-z,ibm-z-kvm[] - - /ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - /ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -ifndef::ibm-z,ibm-z-kvm[] - - //release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - //release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -endif::openshift-origin[] -endif::restricted[] ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<3> Specifies whether to enable or disable simultaneous multithreading (SMT), or hyperthreading. By default, SMT is enabled to increase the performance of the cores in your machines. You can disable it by setting the parameter value to `Disabled`. If you disable SMT, you must disable it in all cluster machines; this includes both control plane and compute machines. -ifndef::ibm-z,ibm-z-kvm[] -+ -[NOTE] -==== -Simultaneous multithreading (SMT) is enabled by default. If SMT is not enabled in your BIOS settings, the `hyperthreading` parameter has no effect. -==== -+ -[IMPORTANT] -==== -If you disable `hyperthreading`, whether in the BIOS or in the `install-config.yaml` file, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -+ -[NOTE] -==== -Simultaneous multithreading (SMT) is enabled by default. If SMT is not available on your {product-title} nodes, the `hyperthreading` parameter has no effect. -==== -+ -[IMPORTANT] -==== -If you disable `hyperthreading`, whether on your {product-title} nodes or in the `install-config.yaml` file, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -endif::ibm-z,ibm-z-kvm[] -<4> You must set this value to `0` when you install {product-title} on user-provisioned infrastructure. In installer-provisioned installations, the parameter controls the number of compute machines that the cluster creates and manages for you. In user-provisioned installations, you must manually deploy the compute machines before you finish installing the cluster. -+ -[NOTE] -==== -If you are installing a three-node cluster, do not deploy any compute machines when you install the {op-system-first} machines. -==== -+ -<5> The number of control plane machines that you add to the cluster. Because the cluster uses these values as the number of etcd endpoints in the cluster, the value must match the number of control plane machines that you deploy. -<6> The cluster name that you specified in your DNS records. -<7> A block of IP addresses from which pod IP addresses are allocated. This block must not overlap with existing physical networks. These IP addresses are used for the pod network. If you need to access the pods from an external network, you must configure load balancers and routers to manage the traffic. -+ -[NOTE] -==== -Class E CIDR range is reserved for a future use. To use the Class E CIDR range, you must ensure your networking environment accepts the IP addresses within the Class E CIDR range. -==== -+ -<8> The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `/23` subnet out of the given `cidr`, which allows for 510 (2^(32 - 23) - 2) pod IP addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. -<9> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<10> The IP address pool to use for service IP addresses. You can enter only one IP address pool. This block must not overlap with existing physical networks. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. -<11> You must set the platform to `none`. You cannot provide additional platform configuration variables for -ifndef::ibm-z,ibm-z-kvm,ibm-power[your platform.] -ifdef::ibm-z,ibm-z-kvm[{ibmzProductName} infrastructure.] -ifdef::ibm-power[{ibmpowerProductName} infrastructure.] -+ -[IMPORTANT] -==== -Clusters that are installed with the platform type `none` are unable to use some features, such as managing compute machines with the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that would normally support the feature. This parameter cannot be changed after installation. -==== -ifndef::openshift-origin[] -<12> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on `x86_64`, `ppc64le`, and `s390x` architectures. -==== -endif::openshift-origin[] -ifndef::restricted[] -ifndef::openshift-origin[] -<13> The {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> The {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -<13> For ``, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, `registry.example.com` or `registry.example.com:5000`. For ``, specify the base64-encoded user name and password for your mirror registry. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> For ``, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, `registry.example.com` or `registry.example.com:5000`. For ``, specify the base64-encoded user name and password for your mirror registry. -endif::openshift-origin[] -endif::restricted[] -ifndef::openshift-origin[] -<14> The SSH public key for the `core` user in {op-system-first}. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> The SSH public key for the `core` user in {op-system-first}. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::restricted[] -ifndef::ibm-z,ibm-z-kvm[] -ifndef::openshift-origin[] -<15> Provide the contents of the certificate file that you used for your mirror registry. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> Provide the contents of the certificate file that you used for your mirror registry. -endif::openshift-origin[] -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -<15> Add the `additionalTrustBundle` parameter and value. The value must be the contents of the certificate file that you used for your mirror registry. The certificate file can be an existing, trusted certificate authority or the self-signed certificate that you generated for the mirror registry. -endif::ibm-z,ibm-z-kvm[] -ifndef::openshift-origin[] -<16> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -endif::restricted[] - - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:!agnostic: -endif::[] diff --git a/modules/installation-bootstrap-gather.adoc b/modules/installation-bootstrap-gather.adoc deleted file mode 100644 index 68c03e91fe09..000000000000 --- a/modules/installation-bootstrap-gather.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-troubleshooting.adoc -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="installation-bootstrap-gather_{context}"] -= Gathering logs from a failed installation - -If you gave an SSH key to your installation program, you can gather data about -your failed installation. - -[NOTE] -==== -You use a different command to gather logs about an unsuccessful installation -than to gather logs from a running cluster. If you must gather logs from a -running cluster, use the `oc adm must-gather` command. -==== - -.Prerequisites - -* Your {product-title} installation failed before the bootstrap process finished. The bootstrap node is running and accessible through SSH. -* The `ssh-agent` process is active on your computer, and you provided the same SSH key to both the `ssh-agent` process and the installation program. -* If you tried to install a cluster on infrastructure that you provisioned, you must have the fully qualified domain names of the bootstrap and control plane nodes. - -.Procedure - -. Generate the commands that are required to obtain the installation logs from -the bootstrap and control plane machines: -+ --- -** If you used installer-provisioned infrastructure, change to the directory that contains the installation program and run the following command: -+ -[source,terminal] ----- -$ ./openshift-install gather bootstrap --dir <1> ----- -<1> `installation_directory` is the directory you specified when you ran `./openshift-install create cluster`. This directory contains the {product-title} -definition files that the installation program creates. -+ -For installer-provisioned infrastructure, the installation program stores -information about the cluster, so you do not specify the hostnames or IP -addresses. - -** If you used infrastructure that you provisioned yourself, change to the directory that contains the installation program and run the following -command: -+ -[source,terminal] ----- -$ ./openshift-install gather bootstrap --dir \ <1> - --bootstrap \ <2> - --master \ <3> - --master \ <3> - --master " <3> ----- -<1> For `installation_directory`, specify the same directory you specified when you ran `./openshift-install create cluster`. This directory contains the {product-title} -definition files that the installation program creates. -<2> `` is the fully qualified domain name or IP address of -the cluster's bootstrap machine. -<3> For each control plane, or master, machine in your cluster, replace `` with its fully qualified domain name or IP address. -+ -[NOTE] -==== -A default cluster contains three control plane machines. List all of your control plane machines as shown, no matter how many your cluster uses. -==== --- -+ -.Example output -[source,terminal] ----- -INFO Pulling debug logs from the bootstrap machine -INFO Bootstrap gather logs captured here "/log-bundle-.tar.gz" ----- -+ -If you open a Red Hat support case about your installation failure, include -the compressed logs in the case. diff --git a/modules/installation-cis-ibm-cloud.adoc b/modules/installation-cis-ibm-cloud.adoc deleted file mode 100644 index b4ee1501fe71..000000000000 --- a/modules/installation-cis-ibm-cloud.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc -// installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc - -:_content-type: PROCEDURE -[id="installation-cis-ibm-cloud_{context}"] -= Using IBM Cloud Internet Services for DNS resolution - -The installation program uses IBM Cloud Internet Services (CIS) to configure cluster DNS resolution and provide name lookup for a public cluster. - -[NOTE] -==== -This offering does not support IPv6, so dual stack or IPv6 environments are not possible. -==== - -You must create a domain zone in CIS in the same account as your cluster. You must also ensure the zone is authoritative for the domain. You can do this using a root domain or subdomain. - -.Prerequisites - -* You have installed the link:https://www.ibm.com/cloud/cli[IBM Cloud CLI]. -* You have an existing domain and registrar. For more information, see the IBM link:https://cloud.ibm.com/docs/dns?topic=dns-getting-started[documentation]. - -.Procedure - -. Create a CIS instance to use with your cluster: - -.. Install the CIS plugin: -+ -[source,terminal] ----- -$ ibmcloud plugin install cis ----- - -.. Create the CIS instance: -+ -[source,terminal] ----- -$ ibmcloud cis instance-create standard <1> ----- -<1> At a minimum, a `Standard` plan is required for CIS to manage the cluster subdomain and its DNS records. - -. Connect an existing domain to your CIS instance: - -.. Set the context instance for CIS: -+ -[source,terminal] ----- -$ ibmcloud cis instance-set <1> ----- -<1> The instance cloud resource name. - -.. Add the domain for CIS: -+ -[source,terminal] ----- -$ ibmcloud cis domain-add <1> ----- -<1> The fully qualified domain name. You can use either the root domain or subdomain value as the domain name, depending on which you plan to configure. -+ -[NOTE] -==== -A root domain uses the form `openshiftcorp.com`. A subdomain uses the form `clusters.openshiftcorp.com`. -==== - -. Open the link:https://cloud.ibm.com/catalog/services/internet-services[CIS web console], navigate to the *Overview* page, and note your CIS name servers. These name servers will be used in the next step. - -. Configure the name servers for your domains or subdomains at the domain's registrar or DNS provider. For more information, see the IBM Cloud link:https://cloud.ibm.com/docs/cis?topic=cis-getting-started#configure-your-name-servers-with-the-registrar-or-existing-dns-provider[documentation]. \ No newline at end of file diff --git a/modules/installation-cloudformation-bootstrap.adoc b/modules/installation-cloudformation-bootstrap.adoc deleted file mode 100644 index 2ee56936ca8b..000000000000 --- a/modules/installation-cloudformation-bootstrap.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-bootstrap_{context}"] -= CloudFormation template for the bootstrap machine - -You can use the following CloudFormation template to deploy the bootstrap machine that you need for your {product-title} cluster. - -.CloudFormation template for the bootstrap machine -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/04_cluster_bootstrap.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-control-plane.adoc b/modules/installation-cloudformation-control-plane.adoc deleted file mode 100644 index c5cd60b6ca57..000000000000 --- a/modules/installation-cloudformation-control-plane.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-control-plane_{context}"] -= CloudFormation template for control plane machines - -You can use the following CloudFormation template to deploy the control plane -machines that you need for your {product-title} cluster. - -.CloudFormation template for control plane machines -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/05_cluster_master_nodes.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-dns.adoc b/modules/installation-cloudformation-dns.adoc deleted file mode 100644 index fc1b7e3558bb..000000000000 --- a/modules/installation-cloudformation-dns.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-dns_{context}"] -= CloudFormation template for the network and load balancers - -You can use the following CloudFormation template to deploy the networking -objects and load balancers that you need for your {product-title} cluster. - -.CloudFormation template for the network and load balancers -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/02_cluster_infra.yaml[] ----- -==== - -[IMPORTANT] -==== -If you are deploying your cluster to an AWS government or secret region, you must update the `InternalApiServerRecord` to use `CNAME` records. Records of type `ALIAS` are not supported for AWS government regions. For example: - -[source,yaml] ----- -Type: CNAME -TTL: 10 -ResourceRecords: -- !GetAtt IntApiElb.DNSName ----- -==== diff --git a/modules/installation-cloudformation-security.adoc b/modules/installation-cloudformation-security.adoc deleted file mode 100644 index 3dc650bfb6e0..000000000000 --- a/modules/installation-cloudformation-security.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-security_{context}"] -= CloudFormation template for security objects - -You can use the following CloudFormation template to deploy the security objects -that you need for your {product-title} cluster. - -.CloudFormation template for security objects -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/03_cluster_security.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-subnet-localzone.adoc b/modules/installation-cloudformation-subnet-localzone.adoc deleted file mode 100644 index 47503db35a56..000000000000 --- a/modules/installation-cloudformation-subnet-localzone.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: REFERENCE -[id="installation-cloudformation-subnet-localzone_{context}"] -= CloudFormation template for the subnet that uses AWS Local Zones - -You can use the following CloudFormation template to deploy the subnet that -you need for your {product-title} cluster that uses AWS Local Zones. - -.CloudFormation template for the subnet -[%collapsible] -==== -[source,yaml] ----- -# CloudFormation template used to create Local Zone subnets and dependencies -AWSTemplateFormatVersion: 2010-09-09 -Description: Template for create Public Local Zone subnets - -Parameters: - VpcId: - Description: VPC Id - Type: String - ZoneName: - Description: Local Zone Name (Example us-east-1-nyc-1a) - Type: String - SubnetName: - Description: Local Zone Name (Example cluster-public-us-east-1-nyc-1a) - Type: String - PublicRouteTableId: - Description: Public Route Table ID to associate the Local Zone subnet - Type: String - PublicSubnetCidr: - AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-4]))$ - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-24. - Default: 10.0.128.0/20 - Description: CIDR block for Public Subnet - Type: String - -Resources: - PublicSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VpcId - CidrBlock: !Ref PublicSubnetCidr - AvailabilityZone: !Ref ZoneName - Tags: - - Key: Name - Value: !Ref SubnetName - - Key: kubernetes.io/cluster/unmanaged - Value: "true" - - PublicSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet - RouteTableId: !Ref PublicRouteTableId - -Outputs: - PublicSubnetIds: - Description: Subnet IDs of the public subnets. - Value: - !Join ["", [!Ref PublicSubnet]] ----- -==== diff --git a/modules/installation-cloudformation-vpc-localzone.adoc b/modules/installation-cloudformation-vpc-localzone.adoc deleted file mode 100644 index e242a8f0f46d..000000000000 --- a/modules/installation-cloudformation-vpc-localzone.adoc +++ /dev/null @@ -1,309 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: REFERENCE -[id="installation-cloudformation-vpc-localzone_{context}"] -= CloudFormation template for the VPC - -You can use the following CloudFormation template to deploy the VPC that -you need for your {product-title} cluster. - -.CloudFormation template for the VPC -[%collapsible] -==== -[source,yaml] ----- -AWSTemplateFormatVersion: 2010-09-09 -Description: Template for Best Practice VPC with 1-3 AZs - -Parameters: - VpcCidr: - AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-4]))$ - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-24. - Default: 10.0.0.0/16 - Description: CIDR block for VPC. - Type: String - AvailabilityZoneCount: - ConstraintDescription: "The number of availability zones. (Min: 1, Max: 3)" - MinValue: 1 - MaxValue: 3 - Default: 1 - Description: "How many AZs to create VPC subnets for. (Min: 1, Max: 3)" - Type: Number - SubnetBits: - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/19-27. - MinValue: 5 - MaxValue: 13 - Default: 12 - Description: "Size of each subnet to create within the availability zones. (Min: 5 = /27, Max: 13 = /19)" - Type: Number - -Metadata: - AWS::CloudFormation::Interface: - ParameterGroups: - - Label: - default: "Network Configuration" - Parameters: - - VpcCidr - - SubnetBits - - Label: - default: "Availability Zones" - Parameters: - - AvailabilityZoneCount - ParameterLabels: - AvailabilityZoneCount: - default: "Availability Zone Count" - VpcCidr: - default: "VPC CIDR" - SubnetBits: - default: "Bits Per Subnet" - -Conditions: - DoAz3: !Equals [3, !Ref AvailabilityZoneCount] - DoAz2: !Or [!Equals [2, !Ref AvailabilityZoneCount], Condition: DoAz3] - -Resources: - VPC: - Type: "AWS::EC2::VPC" - Properties: - EnableDnsSupport: "true" - EnableDnsHostnames: "true" - CidrBlock: !Ref VpcCidr - PublicSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [0, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 0 - - Fn::GetAZs: !Ref "AWS::Region" - PublicSubnet2: - Type: "AWS::EC2::Subnet" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [1, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 1 - - Fn::GetAZs: !Ref "AWS::Region" - PublicSubnet3: - Type: "AWS::EC2::Subnet" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [2, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 2 - - Fn::GetAZs: !Ref "AWS::Region" - InternetGateway: - Type: "AWS::EC2::InternetGateway" - GatewayToInternet: - Type: "AWS::EC2::VPCGatewayAttachment" - Properties: - VpcId: !Ref VPC - InternetGatewayId: !Ref InternetGateway - PublicRouteTable: - Type: "AWS::EC2::RouteTable" - Properties: - VpcId: !Ref VPC - PublicRoute: - Type: "AWS::EC2::Route" - DependsOn: GatewayToInternet - Properties: - RouteTableId: !Ref PublicRouteTable - DestinationCidrBlock: 0.0.0.0/0 - GatewayId: !Ref InternetGateway - PublicSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet - RouteTableId: !Ref PublicRouteTable - PublicSubnetRouteTableAssociation2: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz2 - Properties: - SubnetId: !Ref PublicSubnet2 - RouteTableId: !Ref PublicRouteTable - PublicSubnetRouteTableAssociation3: - Condition: DoAz3 - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet3 - RouteTableId: !Ref PublicRouteTable - PrivateSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [3, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 0 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable: - Type: "AWS::EC2::RouteTable" - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PrivateSubnet - RouteTableId: !Ref PrivateRouteTable - NAT: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Properties: - AllocationId: - "Fn::GetAtt": - - EIP - - AllocationId - SubnetId: !Ref PublicSubnet - EIP: - Type: "AWS::EC2::EIP" - Properties: - Domain: vpc - Route: - Type: "AWS::EC2::Route" - Properties: - RouteTableId: - Ref: PrivateRouteTable - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT - PrivateSubnet2: - Type: "AWS::EC2::Subnet" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [4, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 1 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable2: - Type: "AWS::EC2::RouteTable" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation2: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz2 - Properties: - SubnetId: !Ref PrivateSubnet2 - RouteTableId: !Ref PrivateRouteTable2 - NAT2: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Condition: DoAz2 - Properties: - AllocationId: - "Fn::GetAtt": - - EIP2 - - AllocationId - SubnetId: !Ref PublicSubnet2 - EIP2: - Type: "AWS::EC2::EIP" - Condition: DoAz2 - Properties: - Domain: vpc - Route2: - Type: "AWS::EC2::Route" - Condition: DoAz2 - Properties: - RouteTableId: - Ref: PrivateRouteTable2 - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT2 - PrivateSubnet3: - Type: "AWS::EC2::Subnet" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [5, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 2 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable3: - Type: "AWS::EC2::RouteTable" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation3: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz3 - Properties: - SubnetId: !Ref PrivateSubnet3 - RouteTableId: !Ref PrivateRouteTable3 - NAT3: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Condition: DoAz3 - Properties: - AllocationId: - "Fn::GetAtt": - - EIP3 - - AllocationId - SubnetId: !Ref PublicSubnet3 - EIP3: - Type: "AWS::EC2::EIP" - Condition: DoAz3 - Properties: - Domain: vpc - Route3: - Type: "AWS::EC2::Route" - Condition: DoAz3 - Properties: - RouteTableId: - Ref: PrivateRouteTable3 - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT3 - S3Endpoint: - Type: AWS::EC2::VPCEndpoint - Properties: - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: '*' - Action: - - '*' - Resource: - - '*' - RouteTableIds: - - !Ref PublicRouteTable - - !Ref PrivateRouteTable - - !If [DoAz2, !Ref PrivateRouteTable2, !Ref "AWS::NoValue"] - - !If [DoAz3, !Ref PrivateRouteTable3, !Ref "AWS::NoValue"] - ServiceName: !Join - - '' - - - com.amazonaws. - - !Ref 'AWS::Region' - - .s3 - VpcId: !Ref VPC - -Outputs: - VpcId: - Description: ID of the new VPC. - Value: !Ref VPC - PublicSubnetIds: - Description: Subnet IDs of the public subnets. - Value: - !Join [ - ",", - [!Ref PublicSubnet, !If [DoAz2, !Ref PublicSubnet2, !Ref "AWS::NoValue"], !If [DoAz3, !Ref PublicSubnet3, !Ref "AWS::NoValue"]] - ] - PrivateSubnetIds: - Description: Subnet IDs of the private subnets. - Value: - !Join [ - ",", - [!Ref PrivateSubnet, !If [DoAz2, !Ref PrivateSubnet2, !Ref "AWS::NoValue"], !If [DoAz3, !Ref PrivateSubnet3, !Ref "AWS::NoValue"]] - ] - PublicRouteTableId: - Description: Public Route table ID - Value: !Ref PublicRouteTable ----- -==== diff --git a/modules/installation-cloudformation-vpc.adoc b/modules/installation-cloudformation-vpc.adoc deleted file mode 100644 index 9cc2bb3920c3..000000000000 --- a/modules/installation-cloudformation-vpc.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-vpc_{context}"] -= CloudFormation template for the VPC - -You can use the following CloudFormation template to deploy the VPC that -you need for your {product-title} cluster. - -.CloudFormation template for the VPC -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/01_vpc.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-worker.adoc b/modules/installation-cloudformation-worker.adoc deleted file mode 100644 index 91d8186ed923..000000000000 --- a/modules/installation-cloudformation-worker.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-worker_{context}"] -= CloudFormation template for worker machines - -You can use the following CloudFormation template to deploy the worker machines -that you need for your {product-title} cluster. - -.CloudFormation template for worker machines -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/06_cluster_worker_node.yaml[] ----- -==== diff --git a/modules/installation-complete-user-infra.adoc b/modules/installation-complete-user-infra.adoc deleted file mode 100644 index a4345b049e48..000000000000 --- a/modules/installation-complete-user-infra.adoc +++ /dev/null @@ -1,198 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifdef::openshift-origin[] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] -:_content-type: PROCEDURE -[id="installation-complete-user-infra_{context}"] -= Completing installation on user-provisioned infrastructure - -After you complete the Operator configuration, you can finish installing the -cluster on infrastructure that you provide. - -.Prerequisites - -* Your control plane has initialized. -* You have completed the initial Operator configuration. - -.Procedure - -. Confirm that all the cluster components are online with the following command: -+ -[source,terminal] ----- -$ watch -n5 oc get clusteroperators ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication {product-version}.0 True False False 19m -baremetal {product-version}.0 True False False 37m -cloud-credential {product-version}.0 True False False 40m -cluster-autoscaler {product-version}.0 True False False 37m -config-operator {product-version}.0 True False False 38m -console {product-version}.0 True False False 26m -csi-snapshot-controller {product-version}.0 True False False 37m -dns {product-version}.0 True False False 37m -etcd {product-version}.0 True False False 36m -image-registry {product-version}.0 True False False 31m -ingress {product-version}.0 True False False 30m -insights {product-version}.0 True False False 31m -kube-apiserver {product-version}.0 True False False 26m -kube-controller-manager {product-version}.0 True False False 36m -kube-scheduler {product-version}.0 True False False 36m -kube-storage-version-migrator {product-version}.0 True False False 37m -machine-api {product-version}.0 True False False 29m -machine-approver {product-version}.0 True False False 37m -machine-config {product-version}.0 True False False 36m -marketplace {product-version}.0 True False False 37m -monitoring {product-version}.0 True False False 29m -network {product-version}.0 True False False 38m -node-tuning {product-version}.0 True False False 37m -openshift-apiserver {product-version}.0 True False False 32m -openshift-controller-manager {product-version}.0 True False False 30m -openshift-samples {product-version}.0 True False False 32m -operator-lifecycle-manager {product-version}.0 True False False 37m -operator-lifecycle-manager-catalog {product-version}.0 True False False 37m -operator-lifecycle-manager-packageserver {product-version}.0 True False False 32m -service-ca {product-version}.0 True False False 38m -storage {product-version}.0 True False False 37m ----- -+ -Alternatively, the following command notifies you when all of the clusters are available. It also retrieves and displays credentials: -+ -[source,terminal] ----- -$ ./openshift-install --dir wait-for install-complete <1> ----- -<1> For ``, specify the path to the directory that you -stored the installation files in. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the cluster to initialize... ----- -+ -The command succeeds when the Cluster Version Operator finishes deploying the -{product-title} cluster from Kubernetes API server. -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -. Confirm that the Kubernetes API server is communicating with the pods. -.. To view a list of all pods, use the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -openshift-apiserver-operator openshift-apiserver-operator-85cb746d55-zqhs8 1/1 Running 1 9m -openshift-apiserver apiserver-67b9g 1/1 Running 0 3m -openshift-apiserver apiserver-ljcmx 1/1 Running 0 1m -openshift-apiserver apiserver-z25h4 1/1 Running 0 2m -openshift-authentication-operator authentication-operator-69d5d8bf84-vh2n8 1/1 Running 0 5m -... ----- - -.. View the logs for a pod that is listed in the output of the previous command -by using the following command: -+ -[source,terminal] ----- -$ oc logs -n <1> ----- -<1> Specify the pod name and namespace, as shown in the output of the previous -command. -+ -If the pod logs display, the Kubernetes API server can communicate with the -cluster machines. - -ifndef::ibm-power[] -. For an installation with Fibre Channel Protocol (FCP), additional steps are required to enable multipathing. Do not enable multipathing during installation. -endif::ibm-power[] -ifdef::ibm-power[] -. Additional steps are required to enable multipathing. Do not enable multipathing during installation. -endif::ibm-power[] -+ -See "Enabling multipathing with kernel arguments on {op-system}" in the _Post-installation machine configuration tasks_ documentation for more information. - -ifdef::restricted[] -. Register your cluster on the link:https://console.redhat.com/openshift/register[Cluster registration] page. -endif::restricted[] - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] diff --git a/modules/installation-configuration-parameters.adoc b/modules/installation-configuration-parameters.adoc deleted file mode 100644 index 7b33d1edc908..000000000000 --- a/modules/installation-configuration-parameters.adoc +++ /dev/null @@ -1,2350 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-default.adoc -// * installing/installing_aws/installing-alibaba-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba_installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/intalling-ibm-cloud-private.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_vmc/installing-restricted-networks-vmc.adoc -// * installing/installing_vmc/installing-vmc-customizations.adoc -// * installing/installing_vmc/installing-vmc-network-customizations.adoc -// * installing/installing_vmc/installing-vmc-user-infra.adoc -// * installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc -// * installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-customizations.adoc -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc -// * installing/installing_vsphere/installation-config-parameters-vsphere.adoc -// * installing/installing_gcp/installation-config-parameters-gcp.adoc - -ifeval::["{context}" == "installing-alibaba-customizations"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC . This attribute excludes arm64 content from installing on gov regions. When government regions are supported on arm64, change `aws-govcloud` to `aws`. -ifeval::["{context}" == "installing-aws-government-region"] -:aws-govcloud: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC. This attribute excludes arm64 content from installing on secret regions. When secret regions are supported on arm64, change `aws-secret` to `aws`. -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:bare: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:bare: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-cloud: -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-cloud: -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vmc-customizations"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-vmc-network-customizations"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vmc"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-vmc-user-infra"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-vmc-network-customizations-user-infra"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vmc-user-infra"] -:vmc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:nutanix: -endif::[] -ifeval::["{context}" == "installation-config-parameters-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installation-config-parameters-gcp"] -:gcp: -endif::[] - - -:_content-type: CONCEPT -[id="installation-configuration-parameters_{context}"] -ifndef::vsphere,gcp[] -= Installation configuration parameters -endif::vsphere,gcp[] - -// Managing headings is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// This accommodates the existing IA of the installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, the conditions can be removed and the following heading can be used. -ifdef::vsphere,gcp[] -= Available installation configuration parameters for {platform} -endif::vsphere,gcp[] - -// If install-config.yaml is generated by openshift-install -// The addition of providers beyond bare,ibm-power,ibm-z,ash is necessary as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493 -// This accommodates the existing IA of the installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, content between lines 277-292 can be completely removed. -ifndef::bare,ibm-power,ibm-z,ash,vsphere,gcp[] -Before you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform. -endif::bare,ibm-power,ibm-z,ash,vsphere,gcp[] - -// If the user manually creates install-config.yaml -ifdef::bare,ibm-power,ibm-power-vs,ibm-z,ash[] -Before you deploy an {product-title} cluster, you provide a customized `install-config.yaml` installation configuration file that describes the details for your environment. -endif::bare,ibm-power,ibm-power-vs,ibm-z,ash[] - -// A condition for this note is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// This accommodates the existing content for installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, this note can be removed from the module -ifndef::vsphere,gcp[] -[NOTE] -==== -After installation, you cannot modify these parameters in the `install-config.yaml` file. -==== -endif::vsphere,gcp[] - -// This condition is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// As part of the update for each provider, this content applies to the net new provider-specific installation configuration parameter assembly. -// As part of the updates for the last provider, the conditions can be completely removed. -ifdef::vsphere,gcp[] -The following tables specify the required, optional, and {platform}-specific installation configuration parameters that you can set as part of the installation process. - -[NOTE] -==== -After installation, you cannot modify these parameters in the `install-config.yaml` file. -==== -endif::vsphere,gcp[] - -[id="installation-configuration-parameters-required_{context}"] -== Required configuration parameters - -Required installation configuration parameters are described in the following table: - -.Required parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`apiVersion` -|The API version for the `install-config.yaml` content. The current version is `v1`. The installation program may also support older API versions. -|String - -|`baseDomain` -|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `.` format. -|A fully-qualified domain or subdomain name, such as `example.com`. - -|`metadata` -|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed. -|Object - -|`metadata.name` -|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`. -ifndef::bare,nutanix,vsphere[] -|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`. -endif::bare,nutanix,vsphere[] -ifdef::bare,nutanix,vsphere[] -|String of lowercase letters and hyphens (`-`), such as `dev`. -endif::bare,nutanix,vsphere[] -ifdef::osp[] -The string must be 14 characters or fewer long. -endif::osp[] - -|`platform` -|The configuration for the specific platform upon which to perform the installation: `alibabacloud`, `aws`, `baremetal`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `powervs`, `vsphere`, or `{}`. For additional information about `platform.` parameters, consult the table for your specific platform that follows. -|Object - -ifndef::openshift-origin[] -|`pullSecret` -|Get a {cluster-manager-url-pull} to authenticate downloading container images for {product-title} components from services such as Quay.io. -| -[source,json] ----- -{ - "auths":{ - "cloud.openshift.com":{ - "auth":"b3Blb=", - "email":"you@example.com" - }, - "quay.io":{ - "auth":"b3Blb=", - "email":"you@example.com" - } - } -} ----- -endif::[] - -|==== - -[id="installation-configuration-parameters-network_{context}"] -== Network configuration parameters - -You can customize your installation configuration based on the requirements of your existing network infrastructure. For example, you can expand the IP address block for the cluster network or provide different IP address blocks than the defaults. - -// OSDOCS-1640 - IPv4/IPv6 dual-stack bare metal only -// But only for installer-provisioned -// https://bugzilla.redhat.com/show_bug.cgi?id=2020416 -// Once BM UPI supports dual-stack, uncomment all the following conditionals and blocks - -ifndef::bare,vsphere[] -Only IPv4 addresses are supported. -endif::[] - -ifdef::bare,vsphere[] -* If you use the {openshift-networking} OVN-Kubernetes network plugin, both IPv4 and IPv6 address families are supported. - -* If you use the {openshift-networking} OpenShift SDN network plugin, only the IPv4 address family is supported. - -ifdef::ibm-cloud[] -[NOTE] -==== -IBM Cloud VPC does not support IPv6 address families. -==== -endif::ibm-cloud[] - -ifdef::vsphere[] -[NOTE] -==== -On VMware vSphere, dual-stack networking must specify IPv4 as the primary address family. - -The following additional limitations apply to dual-stack networking: - -* Nodes report only their IPv6 IP address in `node.status.addresses` -* Nodes with only a single NIC are supported -* Pods configured for host networking report only their IPv6 addresses in `pod.status.IP` -==== -endif::vsphere[] - -If you configure your cluster to use both IP address families, review the following requirements: - -* Both IP families must use the same network interface for the default gateway. - -* Both IP families must have the default gateway. - -* You must specify IPv4 and IPv6 addresses in the same order for all network configuration parameters. For example, in the following configuration IPv4 addresses are listed before IPv6 addresses. - -[source,yaml] ----- -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - - cidr: fd00:10:128::/56 - hostPrefix: 64 - serviceNetwork: - - 172.30.0.0/16 - - fd00:172:16::/112 ----- -endif::[] - -[NOTE] -==== -Globalnet is not supported with {rh-storage-first} disaster recovery solutions. For regional disaster recovery scenarios, ensure that you use a nonoverlapping range of private IP addresses for the cluster and service networks in each cluster. -==== - -.Network parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`networking` -|The configuration for the cluster network. -|Object - -[NOTE] -==== -You cannot modify parameters specified by the `networking` object after installation. -==== - -|`networking.networkType` -|The {openshift-networking} network plugin to install. -| -ifdef::openshift-origin[] -Either `OpenShiftSDN` or `OVNKubernetes`. The default value is `OVNKubernetes`. -endif::openshift-origin[] -ifndef::openshift-origin[] -ifndef::ibm-power-vs[] -Either `OpenShiftSDN` or `OVNKubernetes`. `OpenShiftSDN` is a CNI plugin for all-Linux networks. `OVNKubernetes` is a CNI plugin for Linux networks and hybrid networks that contain both Linux and Windows servers. The default value is `OVNKubernetes`. -endif::ibm-power-vs[] -ifdef::ibm-power-vs[] -The default value is `OVNKubernetes`. -endif::ibm-power-vs[] -endif::openshift-origin[] - -|`networking.clusterNetwork` -| -The IP address blocks for pods. - -The default value is `10.128.0.0/14` with a host prefix of `/23`. - -If you specify multiple IP address blocks, the blocks must not overlap. -|An array of objects. For example: - -[source,yaml] ----- -ifndef::bare[] -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 -endif::bare[] -ifdef::bare[] -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - - cidr: fd01::/48 - hostPrefix: 64 -endif::bare[] ----- - -|`networking.clusterNetwork.cidr` -| -Required if you use `networking.clusterNetwork`. An IP address block. - -ifndef::bare[] -An IPv4 network. -endif::bare[] - -ifdef::bare[] -If you use the OpenShift SDN network plugin, specify an IPv4 network. If you use the OVN-Kubernetes network plugin, you can specify IPv4 and IPv6 networks. -endif::bare[] -| -An IP address block in Classless Inter-Domain Routing (CIDR) notation. -The prefix length for an IPv4 block is between `0` and `32`. -ifdef::bare[] -The prefix length for an IPv6 block is between `0` and `128`. For example, `10.128.0.0/14` or `fd01::/48`. -endif::bare[] - -|`networking.clusterNetwork.hostPrefix` -|The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23` then each node is assigned a `/23` subnet out of the given `cidr`. A `hostPrefix` value of `23` provides 510 (2^(32 - 23) - 2) pod IP addresses. -| -A subnet prefix. - -ifndef::bare[] -The default value is `23`. -endif::bare[] - -ifdef::bare[] -For an IPv4 network the default value is `23`. -For an IPv6 network the default value is `64`. The default value is also the minimum value for IPv6. -endif::bare[] - -|`networking.serviceNetwork` -| -The IP address block for services. The default value is `172.30.0.0/16`. - -The OpenShift SDN and OVN-Kubernetes network plugins support only a single IP address block for the service network. - -ifdef::bare[] -If you use the OVN-Kubernetes network plugin, you can specify an IP address block for both of the IPv4 and IPv6 address families. -endif::bare[] - -| -An array with an IP address block in CIDR format. For example: - -[source,yaml] ----- -ifndef::bare[] -networking: - serviceNetwork: - - 172.30.0.0/16 -endif::bare[] -ifdef::bare[] -networking: - serviceNetwork: - - 172.30.0.0/16 - - fd02::/112 -endif::bare[] ----- - -|`networking.machineNetwork` -| -The IP address blocks for machines. - -ifndef::ibm-power-vs[] -If you specify multiple IP address blocks, the blocks must not overlap. -endif::ibm-power-vs[] - -ifdef::ibm-z,ibm-power[] -If you specify multiple IP kernel arguments, the `machineNetwork.cidr` value must be the CIDR of the primary network. -endif::ibm-z,ibm-power[] -|An array of objects. For example: - -[source,yaml] ----- -networking: - machineNetwork: - - cidr: 10.0.0.0/16 ----- - -|`networking.machineNetwork.cidr` -| -Required if you use `networking.machineNetwork`. An IP address block. The default value is `10.0.0.0/16` for all platforms other than libvirt and {ibmpowerProductName} Virtual Server. For libvirt, the default value is `192.168.126.0/24`. For {ibmpowerProductName} Virtual Server, the default value is `192.168.0.0/24`. -ifdef::ibm-cloud-vpc[] -The CIDR must contain the subnets defined in `platform.ibmcloud.controlPlaneSubnets` and `platform.ibmcloud.computeSubnets`. -endif::ibm-cloud-vpc[] -| -An IP network block in CIDR notation. - -ifndef::bare,ibm-power-vs[] -For example, `10.0.0.0/16`. -endif::bare,ibm-power-vs[] -ifdef::bare[] -For example, `10.0.0.0/16` or `fd00::/48`. -endif::bare[] -ifdef::ibm-power-vs[] -For example, `192.168.0.0/24`. -endif::ibm-power-vs[] - -[NOTE] -==== -Set the `networking.machineNetwork` to match the CIDR that the preferred NIC resides in. -==== - -|==== - -[id="installation-configuration-parameters-optional_{context}"] -== Optional configuration parameters - -Optional installation configuration parameters are described in the following table: - -.Optional parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`additionalTrustBundle` -|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured. -|String - -|`capabilities` -|Controls the installation of optional core cluster components. You can reduce the footprint of your {product-title} cluster by disabling optional components. For more information, see the "Cluster capabilities" page in _Installing_. -|String array - -|`capabilities.baselineCapabilitySet` -|Selects an initial set of optional capabilities to enable. Valid values are `None`, `v4.11`, `v4.12` and `vCurrent`. The default value is `vCurrent`. -|String - -|`capabilities.additionalEnabledCapabilities` -|Extends the set of optional capabilities beyond what you specify in `baselineCapabilitySet`. You may specify multiple capabilities in this parameter. -|String array - -|`cpuPartitioningMode` -|Enables workload partitioning, which isolates {product-title} services, cluster management workloads, and infrastructure pods to run on a reserved set of CPUs. Workload partitioning can only be enabled during installation and cannot be disabled after installation. While this field enables workload partitioning, it does not configure workloads to use specific CPUs. For more information, see the _Workload partitioning_ page in the _Scalability and Performance_ section. -|`None` or `AllNodes`. `None` is the default value. - -|`compute` -|The configuration for the machines that comprise the compute nodes. -|Array of `MachinePool` objects. - -ifndef::openshift-origin[] - -ifndef::aws,bare,ibm-power,ibm-z,azure,ibm-power-vs[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -|String -endif::aws,bare,ibm-power,ibm-z,azure,ibm-power-vs[] - -ifdef::aws,bare,azure[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` and `arm64`. See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -|String -endif::aws,bare,azure[] - -ifdef::ibm-z[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `s390x` (the default). -|String -endif::ibm-z[] - -ifdef::ibm-power,ibm-power-vs[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `ppc64le` (the default). -|String -endif::ibm-power,ibm-power-vs[] -endif::openshift-origin[] - -ifdef::openshift-origin[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -ifdef::aws[] -See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -endif::aws[] -|String -endif::openshift-origin[] - -|`compute.hyperthreading` -|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. -==== -|`Enabled` or `Disabled` - -|`compute.name` -|Required if you use `compute`. The name of the machine pool. -|`worker` - -|`compute.platform` -|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value. -|`alibabacloud`, `aws`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `powervs`, `vsphere`, or `{}` - -|`compute.replicas` -|The number of compute machines, which are also known as worker machines, to provision. -|A positive integer greater than or equal to `2`. The default value is `3`. - -|`featureSet` -|Enables the cluster for a feature set. A feature set is a collection of {product-title} features that are not enabled by default. For more information about enabling a feature set during installation, see "Enabling features using feature gates". -|String. The name of the feature set to enable, such as `TechPreviewNoUpgrade`. - -|`controlPlane` -|The configuration for the machines that comprise the control plane. -|Array of `MachinePool` objects. - -ifndef::openshift-origin[] -ifndef::aws,bare,ibm-z,ibm-power,azure,ibm-power-vs[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -|String -endif::aws,bare,ibm-z,ibm-power,azure,ibm-power-vs[] - -ifdef::aws,bare,azure[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` and `arm64`. See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -|String -endif::aws,bare,azure[] - -ifdef::ibm-z[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `s390x` (the default). -|String -endif::ibm-z[] - -ifdef::ibm-power,ibm-power-vs[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `ppc64le` (the default). -|String -endif::ibm-power,ibm-power-vs[] -endif::openshift-origin[] - -ifdef::openshift-origin[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64`. -ifdef::aws[] -See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -endif::aws[] -|String -endif::openshift-origin[] - -|`controlPlane.hyperthreading` -|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. -==== -|`Enabled` or `Disabled` - -|`controlPlane.name` -|Required if you use `controlPlane`. The name of the machine pool. -|`master` - -|`controlPlane.platform` -|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value. -|`alibabacloud`, `aws`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `powervs`, `vsphere`, or `{}` - -|`controlPlane.replicas` -|The number of control plane machines to provision. -|The only supported value is `3`, which is the default value. - -|`credentialsMode` -|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported. -ifdef::gcp[If you are installing on GCP into a shared virtual private cloud (VPC), `credentialsMode` must be set to `Passthrough` or `Manual`.] -[NOTE] -==== -Not all CCO modes are supported for all cloud providers. For more information about CCO modes, see the _Cloud Credential Operator_ entry in the _Cluster Operators reference_ content. -==== -[NOTE] -==== -If your AWS account has service control policies (SCP) enabled, you must configure the `credentialsMode` parameter to `Mint`, `Passthrough` or `Manual`. -==== -|`Mint`, `Passthrough`, `Manual` or an empty string (`""`). -ifndef::openshift-origin,ibm-power-vs[] -|`fips` -|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on `x86_64`, `ppc64le`, and `s390x` architectures. -==== -[NOTE] -==== -If you are using Azure File storage, you cannot enable FIPS mode. -==== -|`false` or `true` -endif::openshift-origin,ibm-power-vs[] -|`imageContentSources` -|Sources and repositories for the release-image content. -|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table. - -|`imageContentSources.source` -|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications. -|String - -|`imageContentSources.mirrors` -|Specify one or more repositories that may also contain the same images. -|Array of strings - -ifndef::openshift-origin[] -ifdef::aws[] -|`platform.aws.lbType` -|Required to set the NLB load balancer type in AWS. Valid values are `Classic` or `NLB`. If no value is specified, the installation program defaults to `Classic`. The installation program sets the value provided here in the ingress cluster configuration object. If you do not specify a load balancer type for other Ingress Controllers, they use the type set in this parameter. -|`Classic` or `NLB`. The default value is `Classic`. -endif::aws[] -endif::openshift-origin[] - -|`publish` -|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes. -| -ifdef::aws,aws-govcloud,aws-secret,azure,gcp,ibm-cloud[] -`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`. -endif::[] -ifndef::aws,aws-govcloud,aws-secret,azure,gcp,ibm-cloud[] -`Internal` or `External`. The default value is `External`. - -Setting this field to `Internal` is not supported on non-cloud platforms. -ifndef::ibm-power-vs[] -ifeval::[{product-version} <= 4.7] -[IMPORTANT] -==== -If the value of the field is set to `Internal`, the cluster will become non-functional. For more information, refer to link:https://bugzilla.redhat.com/show_bug.cgi?id=1953035[BZ#1953035]. -==== -endif::[] -endif::ibm-power-vs[] -endif::[] - -|`sshKey` -| The SSH key to authenticate access to your cluster machines. -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -a|For example, `sshKey: ssh-ed25519 AAAA..`. - -|==== - -ifdef::aws,aws-govcloud,aws-secret[] -[id="installation-configuration-parameters-optional-aws_{context}"] -== Optional AWS configuration parameters - -Optional AWS configuration parameters are described in the following table: - -.Optional AWS parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.aws.amiID` -|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`compute.platform.aws.iamRole` -|A pre-existing AWS IAM role applied to the compute machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role. -|The name of a valid AWS IAM role. - -|`compute.platform.aws.rootVolume.iops` -|The Input/Output Operations Per Second (IOPS) that is reserved for the root volume. -|Integer, for example `4000`. - -|`compute.platform.aws.rootVolume.size` -|The size in GiB of the root volume. -|Integer, for example `500`. - -|`compute.platform.aws.rootVolume.type` -|The type of the root volume. -|Valid link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html[AWS EBS volume type], -such as `io1`. - -|`compute.platform.aws.rootVolume.kmsKeyARN` -|The Amazon Resource Name (key ARN) of a KMS key. This is required to encrypt operating system volumes of worker nodes with a specific KMS key. -|Valid link:https://docs.aws.amazon.com/kms/latest/developerguide/find-cmk-id-arn.html[key ID or the key ARN]. - -|`compute.platform.aws.type` -|The EC2 instance type for the compute machines. -|Valid AWS instance type, such as `m4.2xlarge`. See the *Supported AWS machine types* table that follows. -//add an xref when possible. - -|`compute.platform.aws.zones` -|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone. -|A list of valid AWS availability zones, such as `us-east-1c`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`compute.aws.region` -|The AWS region that the installation program creates compute resources in. -|Any valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS region], such as `us-east-1`. You can use the AWS CLI to access the regions available based on your selected instance type. For example: -[source,terminal] ----- -aws ec2 describe-instance-type-offerings --filters Name=instance-type,Values=c7g.xlarge ----- -ifndef::openshift-origin[] -[IMPORTANT] -==== -When running on ARM based AWS instances, ensure that you enter a region where AWS Graviton processors are available. See link:https://aws.amazon.com/ec2/graviton/#Global_availability[Global availability] map in the AWS documentation. Currently, AWS Graviton3 processors are only available in some regions. -==== -endif::openshift-origin[] - - -|`controlPlane.platform.aws.amiID` -|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`controlPlane.platform.aws.iamRole` -|A pre-existing AWS IAM role applied to the control plane machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role. -|The name of a valid AWS IAM role. - -|`controlPlane.platform.aws.rootVolume.kmsKeyARN` -|The Amazon Resource Name (key ARN) of a KMS key. This is required to encrypt operating system volumes of control plane nodes with a specific KMS key. -|Valid link:https://docs.aws.amazon.com/kms/latest/developerguide/find-cmk-id-arn.html[key ID and the key ARN]. - -|`controlPlane.platform.aws.type` -|The EC2 instance type for the control plane machines. -|Valid AWS instance type, such as `m6i.xlarge`. See the *Supported AWS machine types* table that follows. -//add an xref when possible - -|`controlPlane.platform.aws.zones` -|The availability zones where the installation program creates machines for the -control plane machine pool. -|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`controlPlane.aws.region` -|The AWS region that the installation program creates control plane resources in. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS region], such as `us-east-1`. - -|`platform.aws.amiID` -|The AWS AMI used to boot all machines for the cluster. If set, the AMI must -belong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`platform.aws.hostedZone` -|An existing Route 53 private hosted zone for the cluster. You can only use a pre-existing hosted zone when also supplying your own VPC. The hosted zone must already be associated with the user-provided VPC before installation. Also, the domain of the hosted zone must be the cluster domain or a parent of the cluster domain. If undefined, the installation program creates a new hosted zone. -|String, for example `Z3URY6TWQ91KVV`. - -|`platform.aws.serviceEndpoints.name` -|The AWS service endpoint name. Custom endpoints are only required for cases -where alternative AWS endpoints, like FIPS, must be used. Custom API endpoints -can be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53, -and STS AWS services. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS service endpoint] name. - -|`platform.aws.serviceEndpoints.url` -|The AWS service endpoint URL. The URL must use the `https` protocol and the -host must trust the certificate. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS service endpoint] URL. - -|`platform.aws.userTags` -|A map of keys and values that the installation program adds as tags to all resources that it creates. -|Any valid YAML map, such as key value pairs in the `: ` format. For more information about AWS tags, see link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation. - -[NOTE] -==== -You can add up to 25 user defined tags during installation. The remaining 25 tags are reserved for {product-title}. -==== - -|`platform.aws.propagateUserTags` -| A flag that directs in-cluster Operators to include the specified user tags in the tags of the AWS resources that the Operators create. -| Boolean values, for example `true` or `false`. - - -|`platform.aws.subnets` -|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. - -For a standard cluster, specify a public and a private subnet for each availability zone. - -For a private cluster, specify a private subnet for each availability zone. - -For clusters that use AWS Local Zones, you must add AWS Local Zone subnets to this list to ensure edge machine pool creation. -|Valid subnet IDs. - -|==== -endif::aws,aws-govcloud,aws-secret[] - -ifdef::osp[] -[id="installation-configuration-parameters-additional-osp_{context}"] -== Additional {rh-openstack-first} configuration parameters - -Additional {rh-openstack} configuration parameters are described in the following table: - -.Additional {rh-openstack} parameters -[cols=".^2m,.^3a,^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.openstack.rootVolume.size` -|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage. -|Integer, for example `30`. - -|`compute.platform.openstack.rootVolume.type` -|For compute machines, the root volume's type. -|String, for example `performance`. - -|`controlPlane.platform.openstack.rootVolume.size` -|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage. -|Integer, for example `30`. - -|`controlPlane.platform.openstack.rootVolume.type` -|For control plane machines, the root volume's type. -|String, for example `performance`. - -|`platform.openstack.cloud` -|The name of the {rh-openstack} cloud to use from the list of clouds in the -`clouds.yaml` file. -|String, for example `MyCloud`. - -|`platform.openstack.externalNetwork` -|The {rh-openstack} external network name to be used for installation. -|String, for example `external`. - -|`platform.openstack.computeFlavor` -|The {rh-openstack} flavor to use for control plane and compute machines. - -This property is deprecated. To use a flavor as the default for all machine pools, add it as the value of the `type` key in the `platform.openstack.defaultMachinePlatform` property. You can also set a flavor value for each machine pool individually. - -|String, for example `m1.xlarge`. -|==== - -[id="installation-configuration-parameters-optional-osp_{context}"] -== Optional {rh-openstack} configuration parameters - -Optional {rh-openstack} configuration parameters are described in the following table: - -.Optional {rh-openstack} parameters -[%header, cols=".^2,.^3,.^5a"] -|==== -|Parameter|Description|Values - -|`compute.platform.openstack.additionalNetworkIDs` -|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks. -|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. - -|`compute.platform.openstack.additionalSecurityGroupIDs` -|Additional security groups that are associated with compute machines. -|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`. - -|`compute.platform.openstack.zones` -|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installation program relies on the default settings for Nova that the {rh-openstack} administrator configured. - -On clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property. -|A list of strings. For example, `["zone-1", "zone-2"]`. - -|`compute.platform.openstack.rootVolume.zones` -|For compute machines, the availability zone to install root volumes on. If you do not set a value for this parameter, the installation program selects the default availability zone. -|A list of strings, for example `["zone-1", "zone-2"]`. - -|`compute.platform.openstack.serverGroupPolicy` -|Server group policy to apply to the group that will contain the compute machines in the pool. You cannot change server group policies or affiliations after creation. Supported options include `anti-affinity`, `soft-affinity`, and `soft-anti-affinity`. The default value is `soft-anti-affinity`. - -An `affinity` policy prevents migrations and therefore affects {rh-openstack} upgrades. The `affinity` policy is not supported. - -If you use a strict `anti-affinity` policy, an additional {rh-openstack} host is required during instance migration. -|A server group policy to apply to the machine pool. For example, `soft-affinity`. - -|`controlPlane.platform.openstack.additionalNetworkIDs` -|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks. - -Additional networks that are attached to a control plane machine are also attached to the bootstrap node. -|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. - -|`controlPlane.platform.openstack.additionalSecurityGroupIDs` -|Additional security groups that are associated with control plane machines. -|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`. - -|`controlPlane.platform.openstack.zones` -|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installation program relies on the default settings for Nova that the {rh-openstack} administrator configured. - -On clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property. -|A list of strings. For example, `["zone-1", "zone-2"]`. - -|`controlPlane.platform.openstack.rootVolume.zones` -|For control plane machines, the availability zone to install root volumes on. If you do not set this value, the installation program selects the default availability zone. -|A list of strings, for example `["zone-1", "zone-2"]`. - -|`controlPlane.platform.openstack.serverGroupPolicy` -|Server group policy to apply to the group that will contain the control plane machines in the pool. You cannot change server group policies or affiliations after creation. Supported options include `anti-affinity`, `soft-affinity`, and `soft-anti-affinity`. The default value is `soft-anti-affinity`. - -An `affinity` policy prevents migrations, and therefore affects {rh-openstack} upgrades. The `affinity` policy is not supported. - -If you use a strict `anti-affinity` policy, an additional {rh-openstack} host is required during instance migration. -|A server group policy to apply to the machine pool. For example, `soft-affinity`. - -|`platform.openstack.clusterOSImage` -|The location from which the installation program downloads the {op-system} image. - -You must set this parameter to perform an installation in a restricted network. -|An HTTP or HTTPS URL, optionally with an SHA-256 checksum. - -For example, `\http://mirror.example.com/images/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`. -The value can also be the name of an existing Glance image, for example `my-rhcos`. - -|`platform.openstack.clusterOSImageProperties` -|Properties to add to the installer-uploaded ClusterOSImage in Glance. This property is ignored if `platform.openstack.clusterOSImage` is set to an existing Glance image. - -You can use this property to exceed the default persistent volume (PV) limit for {rh-openstack} of 26 PVs per node. To exceed the limit, set the `hw_scsi_model` property value to `virtio-scsi` and the `hw_disk_bus` value to `scsi`. - -You can also use this property to enable the QEMU guest agent by including the `hw_qemu_guest_agent` property with a value of `yes`. -|A list of key-value string pairs. For example, `["hw_scsi_model": "virtio-scsi", "hw_disk_bus": "scsi"]`. - -|`platform.openstack.defaultMachinePlatform` -|The default machine pool platform configuration. -| -[source,json] ----- -{ - "type": "ml.large", - "rootVolume": { - "size": 30, - "type": "performance" - } -} ----- - -|`platform.openstack.ingressFloatingIP` -|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property. -|An IP address, for example `128.0.0.1`. - -|`platform.openstack.apiFloatingIP` -|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property. -|An IP address, for example `128.0.0.1`. - -|`platform.openstack.externalDNS` -|IP addresses for external DNS servers that cluster instances use for DNS resolution. -|A list of IP addresses as strings. For example, `["8.8.8.8", "192.168.1.12"]`. - -|`platform.openstack.loadbalancer` -|Whether or not to use the default, internal load balancer. If the value is set to `UserManaged`, this default load balancer is disabled so that you can deploy a cluster that uses an external, user-managed load balancer. If the parameter is not set, or if the value is `OpenShiftManagedDefault`, the cluster uses the default load balancer. -|`UserManaged` or `OpenShiftManagedDefault`. - -|`platform.openstack.machinesSubnet` -|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet. - -The first item in `networking.machineNetwork` must match the value of `machinesSubnet`. - -If you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/command_line_interface_reference/subnet[add DNS to the subnet in {rh-openstack}]. - -|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. -|==== - -[id="installation-configuration-parameters-failure-domains-osp_{context}"] -== {rh-openstack} parameters for failure domains - -:FeatureName: {rh-openstack} failure domains -[IMPORTANT] -==== -[subs="attributes+"] -{FeatureName} is a Technology Preview feature only. Technology Preview features -are not supported with Red Hat production service level agreements (SLAs) and -might not be functionally complete. Red Hat does not recommend using them -in production. These features provide early access to upcoming product -features, enabling customers to test functionality and provide feedback during -the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. -==== -// Undefine {FeatureName} attribute, so that any mistakes are easily spotted -:!FeatureName: - -{rh-openstack-first} deployments do not have a single implementation of failure domains. Instead, availability zones are defined individually for each service, such as the compute service, Nova; the networking service, Neutron; and the storage service, Cinder. - -Beginning with {product-title} 4.13, there is a unified definition of failure domains for {rh-openstack} deployments that covers all supported availability zone types. You can use failure domains to control related aspects of Nova, Neutron, and Cinder configurations from a single place. - -In {rh-openstack}, a port describes a network connection and maps to an interface inside a compute machine. A port also: - -* Is defined by a network or by one more or subnets -* Connects a machine to one or more subnets - -Failure domains group the services of your deployment by using ports. If you use failure domains, each machine connects to: - -* The `portTarget` object with the ID `control-plane` while that object exists. -* All non-control-plane `portTarget` objects within its own failure domain. -* All networks in the machine pool's `additionalNetworkIDs` list. - -To configure failure domains for a machine pool, edit availability zone and port target parameters under `controlPlane.platform.openstack.failureDomains`. - -.{rh-openstack} parameters for failure domains -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.openstack.failuredomains.computeAvailabilityZone` -|An availability zone for the server. If not specified, the cluster default is used. -|The name of the availability zone. For example, `nova-1`. - -|`platform.openstack.failuredomains.storageAvailabilityZone` -|An availability zone for the root volume. If not specified, the cluster default is used. -|The name of the availability zone. For example, `cinder-1`. - -|`platform.openstack.failuredomains.portTargets` -|A list of `portTarget` objects, each of which defines a network connection to attach to machines within a failure domain. -|A list of `portTarget` objects. - -|`platform.openstack.failuredomains.portTargets.portTarget.id` -|The ID of an individual port target. To select that port target as the first network for machines, set the value of this parameter to `control-plane`. If this parameter has a different value, it is ignored. -|`control-plane` or an arbitrary string. - -|`platform.openstack.failuredomains.portTargets.portTarget.network` -|Required. The name or ID of the network to attach to machines in the failure domain. -a|A `network` object that contains either a name or UUID. For example: - -[source,yaml] ----- -network: - id: 8db6a48e-375b-4caa-b20b-5b9a7218bfe6 ----- - -or: - -[source,yaml] ----- -network: - name: my-network-1 ----- - -|`platform.openstack.failuredomains.portTargets.portTarget.fixedIPs` -|Subnets to allocate fixed IP addresses to. These subnets must exist within the same network as the port. -|A list of `subnet` objects. -|==== - -NOTE: You cannot combine zone fields and failure domains. If you want to use failure domains, the `controlPlane.zone` and `controlPlane.rootVolume.zone` fields must be left unset. -endif::osp[] - -ifdef::azure[] -[id="installation-configuration-parameters-additional-azure_{context}"] -== Additional Azure configuration parameters - -Additional Azure configuration parameters are described in the following table: - -.Additional Azure parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.azure.encryptionAtHost` -|Enables host-level encryption for compute machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached and un-managed disks on the VM host. This is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`compute.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`compute.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS`, `premium_LRS`, or `standardSSD_LRS`. The default is `premium_LRS`. - -|`compute.platform.azure.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on compute nodes. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. This resource group should be different from the resource group where you install the cluster to avoid deleting your Azure encryption key when the cluster is destroyed. This value is only necessary if you intend to install the cluster with user-managed disk encryption. -|String, for example `production_encryption_resource_group`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example `production_disk_encryption_set`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt compute machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`compute.platform.azure.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. If instance type of compute machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. -|`Accelerated` or `Basic`. - -|`compute.platform.azure.type` -|Defines the Azure instance type for compute machines. -|String - -|`compute.platform.azure.zones` -|The availability zones where the installation program creates compute machines. -|String list - -|`controlPlane.platform.azure.type` -|Defines the Azure instance type for control plane machines. -|String - -|`controlPlane.platform.azure.zones` -|The availability zones where the installation program creates control plane machines. -|String list - -|`platform.azure.defaultMachinePlatform.encryptionAtHost` -|Enables host-level encryption for compute machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached, and un-managed disks on the VM host. This parameter is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example, `production_disk_encryption_set`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. To avoid deleting your Azure encryption key when the cluster is destroyed, this resource group must be different from the resource group where you install the cluster. This value is necessary only if you intend to install the cluster with user-managed disk encryption. -|String, for example, `production_encryption_resource_group`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt compute machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS` or `standardSSD_LRS`. The default is `premium_LRS`. - -|`platform.azure.defaultMachinePlatform.type` -|The Azure instance type for control plane and compute machines. -|The Azure instance type. - -|`platform.azure.defaultMachinePlatform.zones` -|The availability zones where the installation program creates compute and control plane machines. -|String list. - -|`controlPlane.platform.azure.encryptionAtHost` -|Enables host-level encryption for control plane machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached and un-managed disks on the VM host. This is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. This resource group should be different from the resource group where you install the cluster to avoid deleting your Azure encryption key when the cluster is destroyed. This value is only necessary if you intend to install the cluster with user-managed disk encryption. -|String, for example `production_encryption_resource_group`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example `production_disk_encryption_set`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt control plane machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`controlPlane.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `1024`. - -|`controlPlane.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS` or `standardSSD_LRS`. The default is `premium_LRS`. - -|`controlPlane.platform.azure.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on control plane machines. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`controlPlane.platform.azure.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. If instance type of control plane machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. -|`Accelerated` or `Basic`. - -|`platform.azure.baseDomainResourceGroupName` -|The name of the resource group that contains the DNS zone for your base domain. -|String, for example `production_cluster`. - -|`platform.azure.resourceGroupName` -| The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster by using the installation program deletes this resource group. -|String, for example `existing_resource_group`. - -|`platform.azure.outboundType` -|The outbound routing strategy used to connect your cluster to the internet. If -you are using user-defined routing, you must have pre-existing networking -available where the outbound routing has already been configured prior to -installing a cluster. The installation program is not responsible for -configuring user-defined routing. -|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`. - -|`platform.azure.region` -|The name of the Azure region that hosts your cluster. -|Any valid region name, such as `centralus`. - -|`platform.azure.zone` -|List of availability zones to place machines in. For high availability, specify -at least two zones. -|List of zones, for example `["1", "2", "3"]`. - -|`platform.azure.defaultMachinePlatform.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on control plane and compute machines. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`platform.azure.networkResourceGroupName` -|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`. -|String. - -|`platform.azure.virtualNetwork` -|The name of the existing VNet that you want to deploy your cluster to. -|String. - -|`platform.azure.controlPlaneSubnet` -|The name of the existing subnet in your VNet that you want to deploy your control plane machines to. -|Valid CIDR, for example `10.0.0.0/16`. - -|`platform.azure.computeSubnet` -|The name of the existing subnet in your VNet that you want to deploy your compute machines to. -|Valid CIDR, for example `10.0.0.0/16`. - -|`platform.azure.cloudName` -|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used. -|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`. - -|`platform.azure.defaultMachinePlatform.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. -|`Accelerated` or `Basic`. If instance type of control plane and compute machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. - -|==== - -[NOTE] -==== -You cannot customize -link:https://azure.microsoft.com/en-us/global-infrastructure/availability-zones/[Azure Availability Zones] -or -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-using-tags[Use tags to organize your Azure resources] -with an Azure cluster. -==== -endif::azure[] - -ifdef::gcp[] -[id="installation-configuration-parameters-additional-gcp_{context}"] -== Additional Google Cloud Platform (GCP) configuration parameters - -Additional GCP configuration parameters are described in the following table: - -.Additional GCP parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.gcp.network` -|The name of the existing Virtual Private Cloud (VPC) where you want to deploy your cluster. If you want to deploy your cluster into a shared VPC, you must set `platform.gcp.networkProjectID` with the name of the GCP project that contains the shared VPC. -|String. - -|`platform.gcp.networkProjectID` -|Optional. The name of the GCP project that contains the shared VPC where you want to deploy your cluster. -|String. - -|`platform.gcp.projectID` -|The name of the GCP project where the installation program installs the cluster. -|String. - -|`platform.gcp.region` -|The name of the GCP region that hosts your cluster. -|Any valid region name, such as `us-central1`. - -|`platform.gcp.controlPlaneSubnet` -|The name of the existing subnet where you want to deploy your control plane machines. -|The subnet name. - -|`platform.gcp.computeSubnet` -|The name of the existing subnet where you want to deploy your compute machines. -|The subnet name. - -|`platform.gcp.licenses` -|A list of license URLs that must be applied to the compute images. -[IMPORTANT] -==== -The `licenses` parameter is a deprecated field and nested virtualization is enabled by default. It is not recommended to use this field. -==== -|Any license available with the link:https://cloud.google.com/compute/docs/reference/rest/v1/licenses/list[license API], such as the license to enable link:https://cloud.google.com/compute/docs/instances/nested-virtualization/overview[nested virtualization]. You cannot use this parameter with a mechanism that generates pre-built images. Using a license URL forces the installation program to copy the source image before use. - -|`platform.gcp.defaultMachinePlatform.zones` -|The availability zones where the installation program creates machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`platform.gcp.defaultMachinePlatform.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). -|Any size between 16 GB and 65536 GB. - -|`platform.gcp.defaultMachinePlatform.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type]. -|Either the default `pd-ssd` or the `pd-standard` disk type. The control plane nodes must be the `pd-ssd` disk type. Compute nodes can be either type. - -|`platform.gcp.defaultMachinePlatform.tags` -|Optional. Additional network tags to add to the control plane and compute machines. -|One or more strings, for example `network-tag1`. - -|`platform.gcp.defaultMachinePlatform.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane and compute machines. -|The GCP machine type, for example `n1-standard-4`. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for machine disk encryption. -|The encryption key name. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.keyRing` -|The name of the Key Management Service (KMS) key ring to which the KMS key belongs. -|The KMS key ring name. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.location` -|The link:https://cloud.google.com/kms/docs/locations[GCP location] in which the KMS key ring exists. -|The GCP location. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.projectID` -|The ID of the project in which the KMS key ring exists. This value defaults to the value of the `platform.gcp.projectID` parameter if it is not set. -|The GCP project ID. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for control plane and compute machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `@.iam.gserviceaccount.com`. - -|`platform.gcp.defaultMachinePlatform.secureBoot` -|Whether to enable Shielded VM secure boot for all machines in the cluster. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`platform.gcp.defaultMachinePlatform.confidentialCompute` -|Whether to use Confidential VMs for all machines in the cluster. Confidential VMs provide encryption for data during processing. For more information on Confidential computing, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`platform.gcp.defaultMachinePlatform.onHostMaintenance` -|Specifies the behavior of all VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for control plane machine disk encryption. -|The encryption key name. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing` -|For control plane machines, the name of the KMS key ring to which the KMS key belongs. -|The KMS key ring name. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.location` -|For control plane machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -|The GCP location for the key ring. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.projectID` -|For control plane machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -|The GCP project ID. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for control plane machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `@.iam.gserviceaccount.com`. - -|`controlPlane.platform.gcp.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). This value applies to control plane machines. -|Any integer between 16 and 65536. - -|`controlPlane.platform.gcp.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for control plane machines. -|Control plane machines must use the `pd-ssd` disk type, which is the default. - -|`controlPlane.platform.gcp.tags` -|Optional. Additional network tags to add to the control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.tags` parameter for control plane machines. -|One or more strings, for example `control-plane-tag1`. - -|`controlPlane.platform.gcp.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -|The GCP machine type, for example `n1-standard-4`. - -|`controlPlane.platform.gcp.zones` -|The availability zones where the installation program creates control plane machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`controlPlane.platform.gcp.secureBoot` -|Whether to enable Shielded VM secure boot for control plane machines. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`controlPlane.platform.gcp.confidentialCompute` -|Whether to enable Confidential VMs for control plane machines. Confidential VMs provide encryption for data while it is being processed. For more information on Confidential VMs, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential Computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`controlPlane.platform.gcp.onHostMaintenance` -|Specifies the behavior of control plane VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for compute machine disk encryption. -|The encryption key name. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing` -|For compute machines, the name of the KMS key ring to which the KMS key belongs. -|The KMS key ring name. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.location` -|For compute machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -|The GCP location for the key ring. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.projectID` -|For compute machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -|The GCP project ID. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for compute machines. If this value is not set, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `@.iam.gserviceaccount.com`. - -|`compute.platform.gcp.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). This value applies to compute machines. -|Any integer between 16 and 65536. - -|`compute.platform.gcp.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for compute machines. -|Either the default `pd-ssd` or the `pd-standard` disk type. - -|`compute.platform.gcp.tags` -|Optional. Additional network tags to add to the compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.tags` parameter for compute machines. -|One or more strings, for example `compute-network-tag1`. - -|`compute.platform.gcp.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -|The GCP machine type, for example `n1-standard-4`. - -|`compute.platform.gcp.zones` -|The availability zones where the installation program creates compute machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`compute.platform.gcp.secureBoot` -|Whether to enable Shielded VM secure boot for compute machines. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`compute.platform.gcp.confidentialCompute` -|Whether to enable Confidential VMs for compute machines. Confidential VMs provide encryption for data while it is being processed. For more information on Confidential VMs, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential Computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`compute.platform.gcp.onHostMaintenance` -|Specifies the behavior of compute VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|==== - -endif::gcp[] -ifdef::ibm-cloud[] -[id="installation-configuration-parameters-additional-ibm-cloud_{context}"] -== Additional IBM Cloud VPC configuration parameters - -Additional IBM Cloud VPC configuration parameters are described in the following table: - -.Additional IBM Cloud VPC parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.ibmcloud.resourceGroupName` -|The name of an existing resource group. -By default, an installer-provisioned VPC and cluster resources are placed in this resource group. When not specified, the installation program creates the resource group for the cluster. -If you are deploying the cluster into an existing VPC, the installer-provisioned cluster resources are placed in this resource group. When not specified, the installation program creates the resource group for the cluster. The VPC resources that you have provisioned must exist in a resource group that you specify using the `networkResourceGroupName` parameter. -In either case, this resource group must only be used for a single cluster installation, as the cluster components assume ownership of all of the resources in the resource group. [^1^] -|String, for example `existing_resource_group`. - -|`platform.ibmcloud.networkResourceGroupName` -|The name of an existing resource group. This resource contains the existing VPC and subnets to which the cluster will be deployed. This parameter is required when deploying the cluster to a VPC that you have provisioned. -|String, for example `existing_network_resource_group`. - -|`platform.ibmcloud.dedicatedHosts.profile` -|The new dedicated host to create. If you specify a value for `platform.ibmcloud.dedicatedHosts.name`, this parameter is not required. -|Valid IBM Cloud VPC dedicated host profile, such as `cx2-host-152x304`. [^2^] - -|`platform.ibmcloud.dedicatedHosts.name` -|An existing dedicated host. If you specify a value for `platform.ibmcloud.dedicatedHosts.profile`, this parameter is not required. -|String, for example `my-dedicated-host-name`. - -|`platform.ibmcloud.type` -|The instance type for all IBM Cloud VPC machines. -|Valid IBM Cloud VPC instance type, such as `bx2-8x32`. [^2^] - -|`platform.ibmcloud.vpcName` -| The name of the existing VPC that you want to deploy your cluster to. -| String. - -|`platform.ibmcloud.controlPlaneSubnets` -| The name(s) of the existing subnet(s) in your VPC that you want to deploy your control plane machines to. Specify a subnet for each availability zone. -| String array - -|`platform.ibmcloud.computeSubnets` -| The name(s) of the existing subnet(s) in your VPC that you want to deploy your compute machines to. Specify a subnet for each availability zone. Subnet IDs are not supported. -| String array - -|==== -[.small] --- -1. Whether you define an existing resource group, or if the installer creates one, determines how the resource group is treated when the cluster is uninstalled. If you define a resource group, the installer removes all of the installer-provisioned resources, but leaves the resource group alone; if a resource group is created as part of the installation, the installer removes all of the installer-provisioned resources and the resource group. -2. To determine which profile best meets your needs, see https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui[Instance Profiles] in the IBM documentation. --- -endif::ibm-cloud[] - -ifdef::ibm-power-vs[] -[id="installation-configuration-parameters-additional-ibm-cloud_{context}"] -== Additional {ibmpowerProductName} Virtual Server configuration parameters - -Additional {ibmpowerProductName} Virtual Server configuration parameters are described in the following table: - -.Additional {ibmpowerProductName} Virtual Server parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.powervs.userID` -|The UserID is the login for the user's IBM Cloud account. -|String. For example `existing_user_id`. - -|`platform.powervs.powervsResourceGroup` -|The PowerVSResourceGroup is the resource group in which {ibmpowerProductName} Virtual Server resources are created. If using an existing VPC, the existing VPC and subnets should be in this resource group. -|String. For example `existing_resource_group`. - -|`platform.powervs.region` -|Specifies the IBM Cloud colo region where the cluster will be created. -|String. For example `existing_region`. - -|`platform.powervs.zone` -|Specifies the IBM Cloud colo region where the cluster will be created. -|String. For example `existing_zone`. - -|`platform.powervs.serviceInstanceID` -|The ServiceInstanceID is the ID of the Power IAAS instance created from the IBM Cloud Catalog. -|String. For example `existing_service_instance_ID`. - -|`platform.powervs.vpcRegion` -|Specifies the IBM Cloud region in which to create VPC resources. -|String. For example `existing_vpc_region`. - -|`platform.powervs.vpcSubnets` -|Specifies existing subnets (by name) where cluster resources will be created. -|String. For example `powervs_region_example_subnet`. - -|`platform.powervs.vpcName` -|Specifies the IBM Cloud VPC name. -|String. For example `existing_vpcName`. - -|`platform.powervs.cloudConnectionName` -|The CloudConnctionName is the name of an existing PowerVS Cloud connection. -|String. For example `existing_cloudConnectionName`. - -|`platform.powervs.clusterOSImage` -|The ClusterOSImage is a pre-created {ibmpowerProductName} Virtual Server boot image that overrides the default image for cluster nodes. -|String. For example `existing_cluster_os_image`. - -|`platform.powervs.defaultMachinePlatform` -|The DefaultMachinePlatform is the default configuration used when installing on {ibmpowerProductName} Virtual Server for machine pools that do not define their own platform configuration. -|String. For example `existing_machine_platform`. - -//|`platform.ibmcloud.dedicatedHosts.profile` -//|The new dedicated host to create. If you specify a value for `platform.ibmcloud.dedicatedHosts.name`, this parameter is not required. -//|Valid IBM Cloud VPC dedicated host profile, such as `cx2-host-152x304`. [^2^] - -//|`platform.ibmcloud.dedicatedHosts.name` -//|An existing dedicated host. If you specify a value for `platform.ibmcloud.dedicatedHosts.profile`, this parameter is not required. -//|String, for example `my-dedicated-host-name`. - -//|`platform.ibmcloud.type` -//|The instance type for all IBM Cloud VPC machines. -//|Valid IBM Cloud VPC instance type, such as `bx2-8x32`. [^2^] - -|`platform.powervs.memoryGiB` -|The size of a virtual machine's memory, in GB. -|The valid integer must be an integer number of GB that is at least 2 and no more than 64, depending on the machine type. - -|`platform.powervs.procType` -|The ProcType defines the processor sharing model for the instance. -|The valid values are Capped, Dedicated and Shared. - -|`platform.powervs.processors` -|The Processors defines the processing units for the instance. -|The number of processors must be from .5 to 32 cores. The processors must be in increments of .25. - -|`platform.powervs.sysType` -|The SysType defines the system type for the instance. -|The system type must be one of {e980,s922}. - -|==== -[.small] --- -1. Whether you define an existing resource group, or if the installer creates one, determines how the resource group is treated when the cluster is uninstalled. If you define a resource group, the installer removes all of the installer-provisioned resources, but leaves the resource group alone; if a resource group is created as part of the installation, the installer removes all of the installer provisioned resources and the resource group. -2. To determine which profile best meets your needs, see https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui[Instance Profiles] in the IBM documentation. --- -endif::ibm-power-vs[] - -ifdef::vsphere[] - -[id="installation-configuration-parameters-additional-vsphere_{context}"] -== Additional VMware vSphere configuration parameters - -Additional VMware vSphere configuration parameters are described in the following table: - -.Additional VMware vSphere cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.apiVIPs` -|Virtual IP (VIP) addresses that you configured for control plane API access. -a|Multiple IP addresses - -|`platform.vsphere.diskType` -|Optional. The disk provisioning method. This value defaults to the vSphere default storage policy if not set. -|Valid values are `thin`, `thick`, or `eagerZeroedThick`. - -|`platform.vsphere.failureDomains` -|Establishes the relationships between a region and zone. You define a failure domain by using vCenter objects, such as a `datastore` object. A failure domain defines the vCenter location for {product-title} cluster nodes. -|String - -|`platform.vsphere.failureDomains.topology.networks` -|Lists any network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -|String - -|`platform.vsphere.failureDomains.region` -|If you define multiple failure domains for your cluster, you must attach the tag to each vCenter datacenter. To define a region, use a tag from the `openshift-region` tag category. For a single vSphere datacenter environment, you do not need to attach a tag, but you must enter an alphanumeric value, such as `datacenter`, for the parameter. -|String - -|`platform.vsphere.failureDomains.zone` -|If you define multiple failure domains for your cluster, you must attach the tag to each vCenter cluster. To define a zone, use a tag from the `openshift-zone` tag category. For a single vSphere datacenter environment, you do not need to attach a tag, but you must enter an alphanumeric value, such as `cluster`, for the parameter. -|String - -|`platform.vsphere.failureDomains.template` -|Specify the absolute path to a pre-existing {op-system-first} image template or virtual machine. The installation program can use the image template or virtual machine to quickly install {op-system} on vSphere hosts. Consider using this parameter as an alternative to uploading an {op-system} image on vSphere hosts. The parameter is available for use only on installer-provisioned infrastructure. -|String - -|`platform.vsphere.ingressVIPs` -|Virtual IP (VIP) addresses that you configured for cluster Ingress. -|Multiple IP addresses - -|`platform.vsphere` -| Describes your account on the cloud platform that hosts your cluster. You can use the parameter to customize the platform. When providing additional configuration settings for compute and control plane machines in the machine pool, the parameter is optional. You can only specify one vCenter server for your {product-title} cluster. -|String - -|`platform.vsphere.vcenters` -|Lists any fully-qualified hostname or IP address of a vCenter server. -|String - -|`platform.vsphere.vcenters.datacenters` -|Lists and defines the datacenters where {product-title} virtual machines (VMs) operate. The list of datacenters must match the list of datacenters specified in the `failureDomains` field. -|String -|==== - - -[id="deprecated-parameters-vsphere_{context}"] -== Deprecated VMware vSphere configuration parameters - -In {product-title} 4.13, the following vSphere configuration parameters are deprecated. You can continue to use these parameters, but the installation program does not automatically specify these parameters in the `install-config.yaml` file. - -The following table lists each deprecated vSphere configuration parameter: - -.Deprecated VMware vSphere cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.apiVIP` -|The virtual IP (VIP) address that you configured for control plane API access. -a|An IP address, for example `128.0.0.1`. - -[NOTE] -==== -In {product-title} 4.12 and later, the `apiVIP` configuration setting is deprecated. Instead, use a `List` format to enter a value in the `apiVIPs` configuration setting. -==== - -|`platform.vsphere.cluster` -|The vCenter cluster to install the {product-title} cluster in. -|String - -|`platform.vsphere.datacenter` -|Defines the datacenter where {product-title} virtual machines (VMs) operate. -|String - -|`platform.vsphere.defaultDatastore` -|The name of the default datastore to use for provisioning volumes. -|String - -|`platform.vsphere.folder` -|Optional. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the data center virtual machine folder. -|String, for example, `//vm//`. - -|`platform.vsphere.ingressVIP` -|Virtual IP (VIP) addresses that you configured for cluster Ingress. -a|An IP address, for example `128.0.0.1`. - -[NOTE] -==== -In {product-title} 4.12 and later, the `ingressVIP` configuration setting is deprecated. Instead, use a `List` format to enter a value in the `ingressVIPs` configuration setting. -==== - -|`platform.vsphere.network` -|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -|String - -|`platform.vsphere.password` -|The password for the vCenter user name. -|String - -|`platform.vsphere.resourcePool` -|Optional. The absolute path of an existing resource pool where the installation program creates the virtual machines. If you do not specify a value, the installation program installs the resources in the root of the cluster under `//host//Resources`. -|String, for example, `//host//Resources//`. - -|`platform.vsphere.username` -|The user name to use to connect to the vCenter instance with. This user must have at least -the roles and privileges that are required for -link:https://github.com/vmware-archive/vsphere-storage-for-kubernetes/blob/master/documentation/vcp-roles.md[static or dynamic persistent volume provisioning] -in vSphere. -|String - -|`platform.vsphere.vCenter` -|The fully-qualified hostname or IP address of a vCenter server. -|String -|==== - - -[id="installation-configuration-parameters-optional-vsphere_{context}"] -== Optional VMware vSphere machine pool configuration parameters - -Optional VMware vSphere machine pool configuration parameters are described in the following table: - -.Optional VMware vSphere machine pool parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.clusterOSImage` -|The location from which the installation program downloads the {op-system-first} image. Before setting a path value for this parameter, ensure that the {op-system} image's version matches the version of {op-system} that you installed on your {product-title} cluster. -|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, `\https://mirror.openshift.com/images/rhcos--vmware..ova`. - -|`platform.vsphere.osDisk.diskSizeGB` -|The size of the disk in gigabytes. -|Integer - -|`platform.vsphere.cpus` -|The total number of virtual processor cores to assign a virtual machine. The value of `platform.vsphere.cpus` must be a multiple of `platform.vsphere.coresPerSocket` value. -|Integer - -|`platform.vsphere.coresPerSocket` -|The number of cores per socket in a virtual machine. The number of virtual sockets on the virtual machine is `platform.vsphere.cpus`/`platform.vsphere.coresPerSocket`. The default value for control plane nodes and worker nodes is `4` and `2`, respectively. -|Integer - -|`platform.vsphere.memoryMB` -|The size of a virtual machine's memory in megabytes. -|Integer -|==== -endif::vsphere[] - -ifdef::ash[] -[id="installation-configuration-parameters-additional-azure-stack-hub_{context}"] -== Additional Azure Stack Hub configuration parameters - -Additional Azure configuration parameters are described in the following table: - -.Additional Azure Stack Hub parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`compute.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS` or `premium_LRS`. The default is `premium_LRS`. - -|`compute.platform.azure.type` -|Defines the azure instance type for compute machines. -|String - -|`controlPlane.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `1024`. - -|`controlPlane.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS`. - -|`controlPlane.platform.azure.type` -|Defines the azure instance type for control plane machines. -|String - -|`platform.azure.defaultMachinePlatform.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS` or `premium_LRS`. The default is `premium_LRS`. - -|`platform.azure.defaultMachinePlatform.type` -|The Azure instance type for control plane and compute machines. -|The Azure instance type. - -|`platform.azure.armEndpoint` -|The URL of the Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -|String - -|`platform.azure.baseDomainResourceGroupName` -|The name of the resource group that contains the DNS zone for your base domain. -|String, for example `production_cluster`. - -|`platform.azure.region` -|The name of your Azure Stack Hub local region. -|String - -|`platform.azure.resourceGroupName` -|The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster by using the installation program deletes this resource group. -|String, for example `existing_resource_group`. - -|`platform.azure.outboundType` -|The outbound routing strategy used to connect your cluster to the internet. If -you are using user-defined routing, you must have pre-existing networking -available where the outbound routing has already been configured prior to -installing a cluster. The installation program is not responsible for -configuring user-defined routing. -|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`. - -|`platform.azure.cloudName` -|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. -|`AzureStackCloud` - -|`clusterOSImage` -|The URL of a storage blob in the Azure Stack environment that contains an {op-system} VHD. -|String, for example, \https://vhdsa.blob.example.example.com/vhd/rhcos-410.84.202112040202-0-azurestack.x86_64.vhd - -|==== -endif::ash[] - -ifdef::alibabacloud[] -//From: https://github.com/openshift/installer/blob/master/data/data/install.openshift.io_installconfigs.yaml#L20; https://github.com/openshift/openshift-docs/pull/40651/files#r792388476 - -[id="installation-configuration-parameters-additional-alibaba_{context}"] -== Additional Alibaba Cloud configuration parameters - -Additional Alibaba Cloud configuration parameters are described in the following table. The `alibabacloud` parameters are the configuration used when installing on Alibaba Cloud. The `defaultMachinePlatform` parameters are the default configuration used when installing on Alibaba Cloud for machine pools that do not define their own platform configuration. - -These parameters apply to both compute machines and control plane machines where specified. - -[NOTE] -==== -If defined, the parameters `compute.platform.alibabacloud` and `controlPlane.platform.alibabacloud` will overwrite `platform.alibabacloud.defaultMachinePlatform` settings for compute machines and control plane machines respectively. -==== - -.Optional {alibaba} parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.alibabacloud.imageID` -|The imageID used to create the ECS instance. ImageID must belong to the same region as the cluster. -|String. - -|`compute.platform.alibabacloud.instanceType` -|InstanceType defines the ECS instance type. Example: `ecs.g6.large` -|String. - -|`compute.platform.alibabacloud.systemDiskCategory` -|Defines the category of the system disk. Examples: `cloud_efficiency`,`cloud_essd` -|String. - -|`compute.platform.alibabacloud.systemDisksize` -|Defines the size of the system disk in gibibytes (GiB). -|Integer. - -|`compute.platform.alibabacloud.zones` -|The list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`controlPlane.platform.alibabacloud.imageID` -|The imageID used to create the ECS instance. ImageID must belong to the same region as the cluster. -|String. - -|`controlPlane.platform.alibabacloud.instanceType` -|InstanceType defines the ECS instance type. Example: `ecs.g6.xlarge` -|String. - -|`controlPlane.platform.alibabacloud.systemDiskCategory` -|Defines the category of the system disk. Examples: `cloud_efficiency`,`cloud_essd` -|String. - -|`controlPlane.platform.alibabacloud.systemDisksize` -|Defines the size of the system disk in gibibytes (GiB). -|Integer. - -|`controlPlane.platform.alibabacloud.zones` -|The list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`platform.alibabacloud.region` -|Required. The Alibaba Cloud region where the cluster will be created. -|String. - -|`platform.alibabacloud.resourceGroupID` -|The ID of an already existing resource group where the cluster will be installed. If empty, the installation program will create a new resource group for the cluster. -|String. - -|`platform.alibabacloud.tags` -|Additional keys and values to apply to all Alibaba Cloud resources created for the cluster. -|Object. - -|`platform.alibabacloud.vpcID` -|The ID of an already existing VPC where the cluster should be installed. If empty, the installation program will create a new VPC for the cluster. -|String. - -|`platform.alibabacloud.vswitchIDs` -|The ID list of already existing VSwitches where cluster resources will be created. The existing VSwitches can only be used when also using existing VPC. If empty, the installation program will create new VSwitches for the cluster. -|String list. - -|`platform.alibabacloud.defaultMachinePlatform.imageID` -|For both compute machines and control plane machines, the image ID that should be used to create ECS instance. If set, the image ID should belong to the same region as the cluster. -|String. - -|`platform.alibabacloud.defaultMachinePlatform.instanceType` -|For both compute machines and control plane machines, the ECS instance type used to create the ECS instance. Example: `ecs.g6.xlarge` -|String. - -|`platform.alibabacloud.defaultMachinePlatform.systemDiskCategory` -|For both compute machines and control plane machines, the category of the system disk. Examples: `cloud_efficiency`, `cloud_essd`. -|String, for example "", `cloud_efficiency`, `cloud_essd`. - -|`platform.alibabacloud.defaultMachinePlatform.systemDiskSize` -|For both compute machines and control plane machines, the size of the system disk in gibibytes (GiB). The minimum is `120`. -|Integer. - -|`platform.alibabacloud.defaultMachinePlatform.zones` -|For both compute machines and control plane machines, the list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`platform.alibabacloud.privateZoneID` -|The ID of an existing private zone into which to add DNS records for the cluster's internal API. An existing private zone can only be used when also using existing VPC. The private zone must be associated with the VPC containing the subnets. Leave the private zone unset to have the installation program create the private zone on your behalf. -|String. - -|==== - -endif::alibabacloud[] - -ifdef::nutanix[] -[id="installation-configuration-parameters-additional-vsphere_{context}"] -== Additional Nutanix configuration parameters - -Additional Nutanix configuration parameters are described in the following table: - -.Additional Nutanix cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.nutanix.categories.key` -|The name of a prism category key to apply to compute VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`compute.platform.nutanix.categories.value` -|The value of a prism category key-value pair to apply to compute VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`compute.platform.nutanix.project.type` -|The type of identifier you use to select a project for compute VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid` - -|`compute.platform.nutanix.project.name` or `compute.platform.nutanix.project.uuid` -|The name or UUID of a project with which compute VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`compute.platform.nutanix.bootType` -|The boot type that the compute machines use. You must use the `Legacy` boot type in {product-title} {product-version}. For more information on boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. -|`Legacy`, `SecureBoot` or `UEFI`. The default is `Legacy`. - -|`controlPlane.platform.nutanix.categories.key` -|The name of a prism category key to apply to control plane VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`controlPlane.platform.nutanix.categories.value` -|The value of a prism category key-value pair to apply to control plane VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`controlPlane.platform.nutanix.project.type` -|The type of identifier you use to select a project for control plane VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid` - -|`controlPlane.platform.nutanix.project.name` or `controlPlane.platform.nutanix.project.uuid` -|The name or UUID of a project with which control plane VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`platform.nutanix.defaultMachinePlatform.categories.key` -|The name of a prism category key to apply to all VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`platform.nutanix.defaultMachinePlatform.categories.value` -|The value of a prism category key-value pair to apply to all VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`platform.nutanix.defaultMachinePlatform.project.type` -|The type of identifier you use to select a project for all VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid`. - -|`platform.nutanix.defaultMachinePlatform.project.name` or `platform.nutanix.defaultMachinePlatform.project.uuid` -|The name or UUID of a project with which all VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`platform.nutanix.defaultMachinePlatform.bootType` -|The boot type for all machines. You must use the `Legacy` boot type in {product-title} {product-version}. For more information on boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. -|`Legacy`, `SecureBoot` or `UEFI`. The default is `Legacy`. - -|`platform.nutanix.apiVIP` -|The virtual IP (VIP) address that you configured for control plane API access. -|IP address - -|`platform.nutanix.ingressVIP` -|The virtual IP (VIP) address that you configured for cluster ingress. -|IP address - -|`platform.nutanix.prismCentral.endpoint.address` -|The Prism Central domain name or IP address. -|String - -|`platform.nutanix.prismCentral.endpoint.port` -|The port that is used to log into Prism Central. -|String - -|`platform.nutanix.prismCentral.password` -|The password for the Prism Central user name. -|String - -|`platform.nutanix.prismCentral.username` -|The user name that is used to log into Prism Central. -|String - -|`platform.nutanix.prismElments.endpoint.address` -|The Prism Element domain name or IP address. [^1^] -|String - -|`platform.nutanix.prismElments.endpoint.port` -|The port that is used to log into Prism Element. -|String - -|`platform.nutanix.prismElements.uuid` -|The universally unique identifier (UUID) for Prism Element. -|String - -|`platform.nutanix.subnetUUIDs` -|The UUID of the Prism Element network that contains the virtual IP addresses and DNS records that you configured. [^2^] -|String - -|`platform.nutanix.clusterOSImage` -|Optional: By default, the installation program downloads and installs the {op-system-first} image. If Prism Central does not have internet access, you can override the default behavior by hosting the {op-system} image on any HTTP server and pointing the installation program to the image. -|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, \http://example.com/images/rhcos-47.83.202103221318-0-nutanix.x86_64.qcow2 -|==== -[.small] --- -1. The `prismElements` section holds a list of Prism Elements (clusters). A Prism Element encompasses all of the Nutanix resources, for example virtual machines and subnets, that are used to host the {product-title} cluster. Only a single Prism Element is supported. -2. Only one subnet per {product-title} cluster is supported. --- -endif::nutanix[] - -ifdef::bare[] -:!bare: -endif::bare[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-govcloud: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-cloud: -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-cloud: -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vmc-customizations"] -:!vmc: -endif::[] -ifeval::["{context}" == "installing-vmc-network-customizations"] -:!vmc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vmc"] -:!vmc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:!nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!nutanix: -endif::[] -ifeval::["{context}" == "installation-config-parameters-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installation-config-parameters-gcp"] -:!gcp: -endif::[] -:!platform: diff --git a/modules/installation-configure-proxy.adoc b/modules/installation-configure-proxy.adoc deleted file mode 100644 index 24d775949a85..000000000000 --- a/modules/installation-configure-proxy.adoc +++ /dev/null @@ -1,302 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_aws/installing_aws-customizations.adoc -// * installing/installing_aws/installing_aws-network-customizations.adoc -// * installing/installing_aws/installing_aws-private.adoc -// * installing/installing_aws/installing_aws-vpc.adoc -// * installing/installing_aws/installing_aws-china.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * networking/configuring-a-custom-pki.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws: -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:kuryr: -endif::[] - -:_content-type: PROCEDURE -[id="installation-configure-proxy_{context}"] -= Configuring the cluster-wide proxy during installation - -Production environments can deny direct access to the internet and instead have -an HTTP or HTTPS proxy available. You can configure a new {product-title} -cluster to use a proxy by configuring the proxy settings in the -`install-config.yaml` file. - -ifdef::bare-metal[] -[NOTE] -==== -For bare metal installations, if you do not assign node IP addresses from the -range that is specified in the `networking.machineNetwork[].cidr` field in the -`install-config.yaml` file, you must include them in the `proxy.noProxy` field. -==== -endif::bare-metal[] - -ifdef::kuryr[] -[NOTE] -==== -Kuryr installations default to HTTP proxies. -==== -endif::kuryr[] - -.Prerequisites - -ifdef::kuryr[] - -* For Kuryr installations on restricted networks that use the `Proxy` object, the proxy must be able to reply to the router that the cluster uses. To add a static route for the proxy configuration, from a command line as the root user, enter: -+ -[source,terminal] ----- -$ ip route add via ----- - -* The restricted subnet must have a gateway that is defined and available to be linked to the `Router` resource that Kuryr creates. - -endif::kuryr[] -* You have an existing `install-config.yaml` file. -// TODO: xref (../../installing/install_config/configuring-firewall.adoc#configuring-firewall) -* You reviewed the sites that your cluster requires access to and determined whether any of them need to bypass the proxy. By default, all cluster egress traffic is proxied, including calls to hosting cloud provider APIs. You added sites to the `Proxy` object's `spec.noProxy` field to bypass the proxy if necessary. -+ -[NOTE] -==== -The `Proxy` object `status.noProxy` field is populated with the values of the `networking.machineNetwork[].cidr`, `networking.clusterNetwork[].cidr`, and `networking.serviceNetwork[]` fields from your installation configuration. - -For installations on Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, and {rh-openstack-first}, the `Proxy` object `status.noProxy` field is also populated with the instance metadata endpoint (`169.254.169.254`). -==== - -.Procedure - -. Edit your `install-config.yaml` file and add the proxy settings. For example: -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: my.domain.com -proxy: - httpProxy: http://:@: <1> - httpsProxy: https://:@: <2> -ifndef::aws[] - noProxy: example.com <3> -endif::aws[] -ifdef::aws[] - noProxy: ec2..amazonaws.com,elasticloadbalancing..amazonaws.com,s3..amazonaws.com <3> -endif::aws[] -additionalTrustBundle: | <4> - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -additionalTrustBundlePolicy: <5> ----- -<1> A proxy URL to use for creating HTTP connections outside the cluster. The -URL scheme must be `http`. -<2> A proxy URL to use for creating HTTPS connections outside the cluster. -<3> A comma-separated list of destination domain names, IP addresses, or other network CIDRs to exclude from proxying. Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass the proxy for all destinations. -ifdef::aws[] -If you have added the Amazon `EC2`,`Elastic Load Balancing`, and `S3` VPC endpoints to your VPC, you must add these endpoints to the `noProxy` field. -endif::aws[] -ifdef::vsphere[] -You must include vCenter's IP address and the IP range that you use for its machines. -endif::vsphere[] -<4> If provided, the installation program generates a config map that is named `user-ca-bundle` in -the `openshift-config` namespace to hold the additional CA -certificates. If you provide `additionalTrustBundle` and at least one proxy setting, the `Proxy` object is configured to reference the `user-ca-bundle` config map in the `trustedCA` field. The Cluster Network -Operator then creates a `trusted-ca-bundle` config map that merges the contents specified for the `trustedCA` parameter -with the {op-system} trust bundle. The `additionalTrustBundle` field is required unless -the proxy's identity certificate is signed by an authority from the {op-system} trust -bundle. -<5> Optional: The policy to determine the configuration of the `Proxy` object to reference the `user-ca-bundle` config map in the `trustedCA` field. The allowed values are `Proxyonly` and `Always`. Use `Proxyonly` to reference the `user-ca-bundle` config map only when `http/https` proxy is configured. Use `Always` to always reference the `user-ca-bundle` config map. The default value is `Proxyonly`. -+ -[NOTE] -==== -The installation program does not support the proxy `readinessEndpoints` field. -==== -+ -[NOTE] -==== -If the installer times out, restart and then complete the deployment by using the `wait-for` command of the installer. For example: - -[source,terminal] ----- -$ ./openshift-install wait-for install-complete --log-level debug ----- -==== - -. Save the file and reference it when installing {product-title}. - -The installation program creates a cluster-wide proxy that is named `cluster` that uses the proxy -settings in the provided `install-config.yaml` file. If no proxy settings are -provided, a `cluster` `Proxy` object is still created, but it will have a nil -`spec`. - -[NOTE] -==== -Only the `Proxy` object named `cluster` is supported, and no additional -proxies can be created. -==== - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws: -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!kuryr: -endif::[] diff --git a/modules/installation-create-ingress-dns-records.adoc b/modules/installation-create-ingress-dns-records.adoc deleted file mode 100644 index 552e814f512e..000000000000 --- a/modules/installation-create-ingress-dns-records.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-create-ingress-dns-records_{context}"] -= Creating the Ingress DNS Records - -If you removed the DNS Zone configuration, manually create DNS records that point to the Ingress load balancer. -You can create either a wildcard record or specific records. While the following procedure uses A records, you can use other record types that you require, such as CNAME or alias. - -.Prerequisites - -* You deployed an {product-title} cluster on Amazon Web Services (AWS) that uses infrastructure that you provisioned. -* You installed the OpenShift CLI (`oc`). -* You installed the `jq` package. -* You downloaded the AWS CLI and installed it on your computer. See -link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)]. - -.Procedure - -. Determine the routes to create. -** To create a wildcard record, use `*.apps..`, where `` is your cluster name, and `` is the Route 53 base domain for your {product-title} cluster. -** To create specific records, you must create a record for each route that your cluster uses, as shown in the output of the following command: -+ -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- -+ -.Example output -[source,terminal] ----- -oauth-openshift.apps.. -console-openshift-console.apps.. -downloads-openshift-console.apps.. -alertmanager-main-openshift-monitoring.apps.. -prometheus-k8s-openshift-monitoring.apps.. ----- - -. Retrieve the Ingress Operator load balancer status and note the value of the external IP address that it uses, which is shown in the `EXTERNAL-IP` column: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.62.215 ab3...28.us-east-2.elb.amazonaws.com 80:31499/TCP,443:30693/TCP 5m ----- - -. Locate the hosted zone ID for the load balancer: -+ -[source,terminal] ----- -$ aws elb describe-load-balancers | jq -r '.LoadBalancerDescriptions[] | select(.DNSName == "").CanonicalHostedZoneNameID' <1> ----- -<1> For ``, specify the value of the external IP address of the Ingress Operator load balancer that you obtained. -+ -.Example output -[source,terminal] ----- -Z3AADJGX6KTTL2 ----- - -+ -The output of this command is the load balancer hosted zone ID. - -. Obtain the public hosted zone ID for your cluster's domain: -+ -[source,terminal] ----- -$ aws route53 list-hosted-zones-by-name \ - --dns-name "" \ <1> - --query 'HostedZones[? Config.PrivateZone != `true` && Name == `.`].Id' <1> - --output text ----- -<1> For ``, specify the Route 53 base domain for your {product-title} cluster. -+ -.Example output -[source,terminal] ----- -/hostedzone/Z3URY6TWQ91KVV ----- -+ -The public hosted zone ID for your domain is shown in the command output. In this example, it is `Z3URY6TWQ91KVV`. - -. Add the alias records to your private zone: -+ -[source,terminal] ----- -$ aws route53 change-resource-record-sets --hosted-zone-id "" --change-batch '{ <1> -> "Changes": [ -> { -> "Action": "CREATE", -> "ResourceRecordSet": { -> "Name": "\\052.apps.", <2> -> "Type": "A", -> "AliasTarget":{ -> "HostedZoneId": "", <3> -> "DNSName": ".", <4> -> "EvaluateTargetHealth": false -> } -> } -> } -> ] -> }' ----- -<1> For ``, specify the value from the output of the CloudFormation template for DNS and load balancing. -<2> For ``, specify the domain or subdomain that you use with your {product-title} cluster. -<3> For ``, specify the public hosted zone ID for the load balancer that you obtained. -<4> For ``, specify the value of the external IP address of the Ingress Operator load balancer. Ensure that you include the trailing period (`.`) in this parameter value. - -. Add the records to your public zone: -+ -[source,terminal] ----- -$ aws route53 change-resource-record-sets --hosted-zone-id """ --change-batch '{ <1> -> "Changes": [ -> { -> "Action": "CREATE", -> "ResourceRecordSet": { -> "Name": "\\052.apps.", <2> -> "Type": "A", -> "AliasTarget":{ -> "HostedZoneId": "", <3> -> "DNSName": ".", <4> -> "EvaluateTargetHealth": false -> } -> } -> } -> ] -> }' ----- -<1> For ``, specify the public hosted zone for your domain. -<2> For ``, specify the domain or subdomain that you use with your {product-title} cluster. -<3> For ``, specify the public hosted zone ID for the load balancer that you obtained. -<4> For ``, specify the value of the external IP address of the Ingress Operator load balancer. Ensure that you include the trailing period (`.`) in this parameter value. diff --git a/modules/installation-creating-aws-bootstrap.adoc b/modules/installation-creating-aws-bootstrap.adoc deleted file mode 100644 index d6765d2a70af..000000000000 --- a/modules/installation-creating-aws-bootstrap.adoc +++ /dev/null @@ -1,217 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-bootstrap_{context}"] -= Creating the bootstrap node in AWS - -You must create the bootstrap node in Amazon Web Services (AWS) to use during {product-title} cluster initialization. You do this by: - -* Providing a location to serve the `bootstrap.ign` Ignition config file to your cluster. This file is located in your installation directory. The provided CloudFormation Template assumes that the Ignition config files for your cluster are served from an S3 bucket. If you choose to serve the files from another location, you must modify the templates. -* Using the provided CloudFormation template and a custom parameter file to create a stack of AWS resources. The stack represents the bootstrap node that your {product-title} installation requires. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your bootstrap -node, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. - -.Procedure - -. Create the bucket by running the following command: -+ -[source,terminal] ----- -$ aws s3 mb s3://-infra <1> ----- -<1> `-infra` is the bucket name. When creating the `install-config.yaml` file, replace `` with the name specified for the cluster. -+ -You must use a presigned URL for your S3 bucket, instead of the `s3://` schema, if you are: -** Deploying to a region that has endpoints that differ from the AWS SDK. -** Deploying a proxy. -** Providing your own custom endpoints. - -. Upload the `bootstrap.ign` Ignition config file to the bucket by running the following command: -+ -[source,terminal] ----- -$ aws s3 cp /bootstrap.ign s3://-infra/bootstrap.ign <1> ----- -<1> For ``, specify the path to the directory that you stored the installation files in. - -. Verify that the file uploaded by running the following command: -+ -[source,terminal] ----- -$ aws s3 ls s3://-infra/ ----- -+ -.Example output -[source,terminal] ----- -2019-04-03 16:15:16 314878 bootstrap.ign ----- -+ -[NOTE] -==== -The bootstrap Ignition config file does contain secrets, like X.509 keys. The following steps provide basic security for the S3 bucket. To provide additional security, you can enable an S3 bucket policy to allow only certain users, such as the OpenShift IAM user, to access objects that the bucket contains. You can avoid S3 entirely and serve your bootstrap Ignition config file from any address that the bootstrap machine can reach. -==== - -. Create a JSON file that contains the parameter values that the template requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "InfrastructureName", <1> - "ParameterValue": "mycluster-" <2> - }, - { - "ParameterKey": "RhcosAmi", <3> - "ParameterValue": "ami-" <4> - }, - { - "ParameterKey": "AllowedBootstrapSshCidr", <5> - "ParameterValue": "0.0.0.0/0" <6> - }, - { - "ParameterKey": "PublicSubnet", <7> - "ParameterValue": "subnet-" <8> - }, - { - "ParameterKey": "MasterSecurityGroupId", <9> - "ParameterValue": "sg-" <10> - }, - { - "ParameterKey": "VpcId", <11> - "ParameterValue": "vpc-" <12> - }, - { - "ParameterKey": "BootstrapIgnitionLocation", <13> - "ParameterValue": "s3:///bootstrap.ign" <14> - }, - { - "ParameterKey": "AutoRegisterELB", <15> - "ParameterValue": "yes" <16> - }, - { - "ParameterKey": "RegisterNlbIpTargetsLambdaArn", <17> - "ParameterValue": "arn:aws:lambda:::function:-RegisterNlbIpTargets-" <18> - }, - { - "ParameterKey": "ExternalApiTargetGroupArn", <19> - "ParameterValue": "arn:aws:elasticloadbalancing:::targetgroup/-Exter-" <20> - }, - { - "ParameterKey": "InternalApiTargetGroupArn", <21> - "ParameterValue": "arn:aws:elasticloadbalancing:::targetgroup/-Inter-" <22> - }, - { - "ParameterKey": "InternalServiceTargetGroupArn", <23> - "ParameterValue": "arn:aws:elasticloadbalancing:::targetgroup/-Inter-" <24> - } -] - ----- -<1> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<2> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `-`. -<3> Current {op-system-first} AMI to use for the bootstrap node based on your selected architecture. -<4> Specify a valid `AWS::EC2::Image::Id` value. -<5> CIDR block to allow SSH access to the bootstrap node. -<6> Specify a CIDR block in the format `x.x.x.x/16-24`. -<7> The public subnet that is associated with your VPC to launch the bootstrap -node into. -<8> Specify the `PublicSubnetIds` value from the output of the CloudFormation -template for the VPC. -<9> The master security group ID (for registering temporary rules) -<10> Specify the `MasterSecurityGroupId` value from the output of the -CloudFormation template for the security group and roles. -<11> The VPC created resources will belong to. -<12> Specify the `VpcId` value from the output of the CloudFormation template -for the VPC. -<13> Location to fetch bootstrap Ignition config file from. -<14> Specify the S3 bucket and file name in the form -`s3:///bootstrap.ign`. -<15> Whether or not to register a network load balancer (NLB). -<16> Specify `yes` or `no`. If you specify `yes`, you must provide a Lambda -Amazon Resource Name (ARN) value. -<17> The ARN for NLB IP target registration lambda group. -<18> Specify the `RegisterNlbIpTargetsLambda` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<19> The ARN for external API load balancer target group. -<20> Specify the `ExternalApiTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<21> The ARN for internal API load balancer target group. -<22> Specify the `InternalApiTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<23> The ARN for internal service load balancer target group. -<24> Specify the `InternalServiceTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. - -. Copy the template from the *CloudFormation template for the bootstrap machine* -section of this topic and save it as a YAML file on your computer. This template -describes the bootstrap machine that your cluster requires. - -. Optional: If you are deploying the cluster with a proxy, you must update the ignition in the template to add the `ignition.config.proxy` fields. Additionally, If you have added the Amazon EC2, Elastic Load Balancing, and S3 VPC endpoints to your VPC, you must add these endpoints to the `noProxy` field. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the bootstrap node: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <1> - --template-body file://